misc
This commit is contained in:
parent
7dda00b64a
commit
c0872248ab
12
.gitignore
vendored
12
.gitignore
vendored
@ -31,14 +31,8 @@ cache/ETH_USDT_1m_candles.csv
|
|||||||
models/trading_agent_best_pnl.pt
|
models/trading_agent_best_pnl.pt
|
||||||
models/trading_agent_best_reward.pt
|
models/trading_agent_best_reward.pt
|
||||||
models/trading_agent_final.pt
|
models/trading_agent_final.pt
|
||||||
NN/__pycache__/realtime_main.cpython-312.pyc
|
|
||||||
NN/__pycache__/realtime-main.cpython-312.pyc
|
|
||||||
NN/models/__pycache__/__init__.cpython-312.pyc
|
|
||||||
NN/models/__pycache__/cnn_model_pytorch.cpython-312.pyc
|
|
||||||
NN/models/__pycache__/cnn_model.cpython-312.pyc
|
|
||||||
NN/models/__pycache__/transformer_model_pytorch.cpython-312.pyc
|
|
||||||
NN/utils/__pycache__/data_interface.cpython-312.pyc
|
|
||||||
NN/utils/__pycache__/multi_data_interface.cpython-312.pyc
|
|
||||||
NN/utils/__pycache__/realtime_analyzer.cpython-312.pyc
|
|
||||||
models/trading_agent_best_pnl.pt
|
models/trading_agent_best_pnl.pt
|
||||||
*.log
|
*.log
|
||||||
|
NN/models/saved/hybrid_stats_20250409_022901.json
|
||||||
|
*__pycache__*
|
||||||
|
*.png
|
||||||
|
66
.vscode/launch.json
vendored
66
.vscode/launch.json
vendored
@ -1,13 +1,23 @@
|
|||||||
{
|
{
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"configurations": [
|
"configurations": [
|
||||||
|
{
|
||||||
|
"name": "Kill Stale Processes",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "utils/port_manager.py",
|
||||||
|
"args": ["--kill-stale"],
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"justMyCode": true
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "Python Debugger: Current File",
|
"name": "Python Debugger: Current File",
|
||||||
"type": "debugpy",
|
"type": "debugpy",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
// "program": "realtime.py",
|
// "program": "realtime.py",
|
||||||
"program": "${file}",
|
"program": "${file}",
|
||||||
"console": "integratedTerminal"
|
"console": "integratedTerminal",
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Train Bot",
|
"name": "Train Bot",
|
||||||
@ -21,7 +31,8 @@
|
|||||||
"100"
|
"100"
|
||||||
],
|
],
|
||||||
"console": "integratedTerminal",
|
"console": "integratedTerminal",
|
||||||
"justMyCode": true
|
"justMyCode": true,
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Evaluate Bot",
|
"name": "Evaluate Bot",
|
||||||
@ -35,7 +46,8 @@
|
|||||||
"10"
|
"10"
|
||||||
],
|
],
|
||||||
"console": "integratedTerminal",
|
"console": "integratedTerminal",
|
||||||
"justMyCode": true
|
"justMyCode": true,
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Live Trading (Demo)",
|
"name": "Live Trading (Demo)",
|
||||||
@ -56,7 +68,8 @@
|
|||||||
"justMyCode": true,
|
"justMyCode": true,
|
||||||
"env": {
|
"env": {
|
||||||
"PYTHONUNBUFFERED": "1"
|
"PYTHONUNBUFFERED": "1"
|
||||||
}
|
},
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Live Trading (Real)",
|
"name": "Live Trading (Real)",
|
||||||
@ -79,7 +92,8 @@
|
|||||||
"justMyCode": true,
|
"justMyCode": true,
|
||||||
"env": {
|
"env": {
|
||||||
"PYTHONUNBUFFERED": "1"
|
"PYTHONUNBUFFERED": "1"
|
||||||
}
|
},
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "Live Trading (BTC Futures)",
|
"name": "Live Trading (BTC Futures)",
|
||||||
@ -102,7 +116,8 @@
|
|||||||
"justMyCode": true,
|
"justMyCode": true,
|
||||||
"env": {
|
"env": {
|
||||||
"PYTHONUNBUFFERED": "1"
|
"PYTHONUNBUFFERED": "1"
|
||||||
}
|
},
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "NN Training Pipeline",
|
"name": "NN Training Pipeline",
|
||||||
@ -136,6 +151,7 @@
|
|||||||
"TF_CPP_MIN_LOG_LEVEL": "2"
|
"TF_CPP_MIN_LOG_LEVEL": "2"
|
||||||
},
|
},
|
||||||
"pythonArgs": ["-c", "import sys; sys.path.append('f:/projects/gogo2')"],
|
"pythonArgs": ["-c", "import sys; sys.path.append('f:/projects/gogo2')"],
|
||||||
|
"preLaunchTask": "Kill Stale Processes",
|
||||||
"postDebugTask": "Start TensorBoard"
|
"postDebugTask": "Start TensorBoard"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -151,7 +167,43 @@
|
|||||||
"NN_INFERENCE_INTERVAL": "60",
|
"NN_INFERENCE_INTERVAL": "60",
|
||||||
"NN_MODEL_TYPE": "cnn",
|
"NN_MODEL_TYPE": "cnn",
|
||||||
"NN_TIMEFRAME": "1h"
|
"NN_TIMEFRAME": "1h"
|
||||||
}
|
},
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "TRAIN Realtime Charts with NN Inference",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "train_rl_with_realtime.py",
|
||||||
|
"args": [
|
||||||
|
"--episodes",
|
||||||
|
"100",
|
||||||
|
"--max-position",
|
||||||
|
"0.1"
|
||||||
|
],
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"justMyCode": true,
|
||||||
|
"env": {
|
||||||
|
"PYTHONUNBUFFERED": "1",
|
||||||
|
"ENABLE_NN_MODELS": "1",
|
||||||
|
"NN_INFERENCE_INTERVAL": "60",
|
||||||
|
"NN_MODEL_TYPE": "cnn",
|
||||||
|
"NN_TIMEFRAME": "1h"
|
||||||
|
},
|
||||||
|
"preLaunchTask": "Kill Stale Processes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "TensorBoard (Auto Port)",
|
||||||
|
"type": "python",
|
||||||
|
"request": "launch",
|
||||||
|
"program": "utils/launch_tensorboard.py",
|
||||||
|
"args": [
|
||||||
|
"--logdir=NN/models/saved/logs",
|
||||||
|
"--preferred-port=6007",
|
||||||
|
"--port-range=6000-7000"
|
||||||
|
],
|
||||||
|
"console": "integratedTerminal",
|
||||||
|
"justMyCode": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
23
.vscode/tasks.json
vendored
23
.vscode/tasks.json
vendored
@ -6,11 +6,11 @@
|
|||||||
"type": "shell",
|
"type": "shell",
|
||||||
"command": "python",
|
"command": "python",
|
||||||
"args": [
|
"args": [
|
||||||
"-m",
|
"utils/launch_tensorboard.py",
|
||||||
"tensorboard.main",
|
|
||||||
"--logdir=NN/models/saved/logs",
|
"--logdir=NN/models/saved/logs",
|
||||||
"--port=6006",
|
"--preferred-port=6007",
|
||||||
"--host=localhost"
|
"--port-range=6000-7000",
|
||||||
|
"--kill-stale"
|
||||||
],
|
],
|
||||||
"isBackground": true,
|
"isBackground": true,
|
||||||
"problemMatcher": {
|
"problemMatcher": {
|
||||||
@ -23,7 +23,7 @@
|
|||||||
"background": {
|
"background": {
|
||||||
"activeOnStart": true,
|
"activeOnStart": true,
|
||||||
"beginsPattern": ".*TensorBoard.*",
|
"beginsPattern": ".*TensorBoard.*",
|
||||||
"endsPattern": ".*TensorBoard.*"
|
"endsPattern": ".*TensorBoard available at.*"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"presentation": {
|
"presentation": {
|
||||||
@ -33,6 +33,19 @@
|
|||||||
"runOptions": {
|
"runOptions": {
|
||||||
"runOn": "folderOpen"
|
"runOn": "folderOpen"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"label": "Kill Stale Processes",
|
||||||
|
"type": "shell",
|
||||||
|
"command": "python",
|
||||||
|
"args": [
|
||||||
|
"utils/port_manager.py",
|
||||||
|
"--kill-stale"
|
||||||
|
],
|
||||||
|
"presentation": {
|
||||||
|
"reveal": "always",
|
||||||
|
"panel": "shared"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
113
HYBRID_TRAINING_GUIDE.md
Normal file
113
HYBRID_TRAINING_GUIDE.md
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
# Hybrid Training Guide for GOGO2 Trading System
|
||||||
|
|
||||||
|
This guide explains how to run the hybrid training system that combines supervised learning (CNN) and reinforcement learning (DQN) approaches for the trading system.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The hybrid training approach combines:
|
||||||
|
1. **Supervised Learning**: CNN models learn patterns from historical market data
|
||||||
|
2. **Reinforcement Learning**: DQN agent optimizes actual trading decisions
|
||||||
|
|
||||||
|
This combined approach leverages the strengths of both learning paradigms:
|
||||||
|
- CNNs are good at pattern recognition in market data
|
||||||
|
- RL is better for sequential decision-making and optimizing trading strategies
|
||||||
|
|
||||||
|
## Fixed Version
|
||||||
|
|
||||||
|
We created `train_hybrid_fixed.py` to address several issues with the original implementation:
|
||||||
|
|
||||||
|
1. **Device Compatibility**: Forces CPU usage to avoid CUDA/device mismatch errors
|
||||||
|
2. **Error Handling**: Added better error recovery during model initialization/training
|
||||||
|
3. **Data Processing**: Improved data formatting for both CNN and DQN models
|
||||||
|
4. **Asynchronous Execution**: Removed async/await code for simpler execution
|
||||||
|
|
||||||
|
## Running the Training
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python train_hybrid_fixed.py [OPTIONS]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Line Options
|
||||||
|
|
||||||
|
| Option | Description | Default |
|
||||||
|
|--------|-------------|---------|
|
||||||
|
| `--iterations` | Number of hybrid iterations to run | 10 |
|
||||||
|
| `--sv-epochs` | Supervised learning epochs per iteration | 5 |
|
||||||
|
| `--rl-episodes` | RL episodes per iteration | 2 |
|
||||||
|
| `--symbol` | Trading symbol | BTC/USDT |
|
||||||
|
| `--timeframes` | Comma-separated timeframes | 1m,5m,15m |
|
||||||
|
| `--window` | Window size for state construction | 24 |
|
||||||
|
| `--batch-size` | Batch size for training | 64 |
|
||||||
|
| `--new-model` | Start with new models (don't load existing) | false |
|
||||||
|
|
||||||
|
### Example
|
||||||
|
|
||||||
|
For a quick test run:
|
||||||
|
```bash
|
||||||
|
python train_hybrid_fixed.py --iterations 2 --sv-epochs 1 --rl-episodes 1 --new-model --batch-size 32
|
||||||
|
```
|
||||||
|
|
||||||
|
For a full training session:
|
||||||
|
```bash
|
||||||
|
python train_hybrid_fixed.py --iterations 20 --sv-epochs 5 --rl-episodes 2 --batch-size 64
|
||||||
|
```
|
||||||
|
|
||||||
|
## Training Output
|
||||||
|
|
||||||
|
The training produces several outputs:
|
||||||
|
|
||||||
|
1. **Model Files**:
|
||||||
|
- `NN/models/saved/supervised_model_best.pt` - Best CNN model
|
||||||
|
- `NN/models/saved/rl_agent_best_policy.pt` - Best RL agent policy network
|
||||||
|
- `NN/models/saved/rl_agent_best_target.pt` - Best RL agent target network
|
||||||
|
- `NN/models/saved/rl_agent_best_agent_state.pt` - RL agent state
|
||||||
|
|
||||||
|
2. **Statistics**:
|
||||||
|
- `NN/models/saved/hybrid_stats_[timestamp].json` - Training statistics
|
||||||
|
- `NN/models/saved/hybrid_stats_latest.json` - Latest training statistics
|
||||||
|
|
||||||
|
3. **TensorBoard Logs**:
|
||||||
|
- Located in the `runs/` directory
|
||||||
|
- View with: `tensorboard --logdir=runs`
|
||||||
|
|
||||||
|
## Known Issues
|
||||||
|
|
||||||
|
1. **Supervised Learning Error (FIXED)**: The dimension mismatch issue in the CNN model has been resolved. The fix involves:
|
||||||
|
- Properly passing the total features to the CNN model during initialization
|
||||||
|
- Updating the forward pass to handle different input dimensions without rebuilding layers
|
||||||
|
- Adding adaptive padding/truncation to handle tensor shape mismatches
|
||||||
|
- Logging and monitoring input shapes for better diagnostics
|
||||||
|
|
||||||
|
2. **Data Fetching Warnings**: The system shows warnings about fetching data from Binance. This is expected in the test environment and doesn't affect training as cached data is used.
|
||||||
|
|
||||||
|
## Next Steps
|
||||||
|
|
||||||
|
1. ~~Fix the supervised learning data formatting issue~~ ✅ Done
|
||||||
|
2. Implement additional metrics tracking and visualization
|
||||||
|
3. Add early stopping based on combined performance
|
||||||
|
4. Add support for multi-pair training
|
||||||
|
5. Implement model export for live trading
|
||||||
|
|
||||||
|
## Latest Improvements
|
||||||
|
|
||||||
|
The following issues have been addressed in the most recent update:
|
||||||
|
|
||||||
|
1. **Fixed CNN Model Dimension Mismatch**: Corrected initialization parameters for the CNNModelPyTorch class and modified how it handles input dimensions.
|
||||||
|
2. **Adaptive Feature Handling**: Instead of rebuilding network layers when feature counts don't match, the model now adaptively handles mismatches by padding or truncating tensors.
|
||||||
|
3. **Better Input Shape Logging**: Added detailed logging of tensor shapes to help diagnose dimension issues.
|
||||||
|
4. **Validation Data Handling**: Added automatic train/validation split when validation data is missing.
|
||||||
|
5. **Error Recovery**: Added defensive programming to handle missing keys in statistics dictionaries.
|
||||||
|
6. **Device Management**: Improved device management to ensure all tensors and models are on the correct device.
|
||||||
|
7. **Custom Training Loop**: Implemented a custom training loop for supervised learning to better control the process.
|
||||||
|
|
||||||
|
## Development Notes
|
||||||
|
|
||||||
|
- The RL component is working correctly and training successfully
|
||||||
|
- ~~The primary issue is with CNN model input dimensions~~ - This issue has been fixed by:
|
||||||
|
- Aligning the feature count between initialization and training data preparation
|
||||||
|
- Adapting the forward pass to handle dimension mismatches gracefully
|
||||||
|
- Adding input validation to prevent crashes during training
|
||||||
|
- We're successfully saving models and statistics
|
||||||
|
- TensorBoard logging is enabled for monitoring training progress
|
||||||
|
- The hybrid model now correctly processes both supervised and reinforcement learning components
|
||||||
|
- The system now gracefully handles errors and recovers from common issues
|
Binary file not shown.
6
NN/environments/__init__.py
Normal file
6
NN/environments/__init__.py
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# Trading environments for reinforcement learning
|
||||||
|
# This module contains environments for training trading agents
|
||||||
|
|
||||||
|
from NN.environments.trading_env import TradingEnvironment
|
||||||
|
|
||||||
|
__all__ = ['TradingEnvironment']
|
484
NN/environments/trading_env.py
Normal file
484
NN/environments/trading_env.py
Normal file
@ -0,0 +1,484 @@
|
|||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from typing import Dict, Tuple, List, Any, Optional
|
||||||
|
import logging
|
||||||
|
import gym
|
||||||
|
from gym import spaces
|
||||||
|
import random
|
||||||
|
|
||||||
|
# Configure logger
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class TradingEnvironment(gym.Env):
|
||||||
|
"""
|
||||||
|
Trading environment implementing gym interface for reinforcement learning
|
||||||
|
|
||||||
|
Actions:
|
||||||
|
- 0: Buy
|
||||||
|
- 1: Sell
|
||||||
|
- 2: Hold
|
||||||
|
|
||||||
|
State:
|
||||||
|
- OHLCV data from multiple timeframes
|
||||||
|
- Technical indicators
|
||||||
|
- Position data
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
data_interface,
|
||||||
|
initial_balance: float = 10000.0,
|
||||||
|
transaction_fee: float = 0.0002,
|
||||||
|
window_size: int = 20,
|
||||||
|
max_position: float = 1.0,
|
||||||
|
reward_scaling: float = 1.0,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Initialize the trading environment.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_interface: DataInterface instance to get market data
|
||||||
|
initial_balance: Initial balance in the base currency
|
||||||
|
transaction_fee: Fee for each transaction as a fraction of trade value
|
||||||
|
window_size: Number of candles in the observation window
|
||||||
|
max_position: Maximum position size as a fraction of balance
|
||||||
|
reward_scaling: Scale factor for rewards
|
||||||
|
"""
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.data_interface = data_interface
|
||||||
|
self.initial_balance = initial_balance
|
||||||
|
self.transaction_fee = transaction_fee
|
||||||
|
self.window_size = window_size
|
||||||
|
self.max_position = max_position
|
||||||
|
self.reward_scaling = reward_scaling
|
||||||
|
|
||||||
|
# Load data for primary timeframe (assuming the first one is primary)
|
||||||
|
self.timeframe = self.data_interface.timeframes[0]
|
||||||
|
self.reset_data()
|
||||||
|
|
||||||
|
# Define action and observation spaces
|
||||||
|
self.action_space = spaces.Discrete(3) # Buy, Sell, Hold
|
||||||
|
|
||||||
|
# For observation space, we consider multiple timeframes with OHLCV data
|
||||||
|
# and additional features like technical indicators, position info, etc.
|
||||||
|
n_timeframes = len(self.data_interface.timeframes)
|
||||||
|
n_features = 5 # OHLCV data by default
|
||||||
|
|
||||||
|
# Add additional features for position, balance, etc.
|
||||||
|
additional_features = 3 # position, balance, unrealized_pnl
|
||||||
|
|
||||||
|
# Calculate total feature dimension
|
||||||
|
total_features = (n_timeframes * n_features * self.window_size) + additional_features
|
||||||
|
|
||||||
|
self.observation_space = spaces.Box(
|
||||||
|
low=-np.inf, high=np.inf, shape=(total_features,), dtype=np.float32
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use tuple for state_shape that EnhancedCNN expects
|
||||||
|
self.state_shape = (total_features,)
|
||||||
|
|
||||||
|
# Initialize state
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def reset_data(self):
|
||||||
|
"""Reset data and generate a new set of price data for training"""
|
||||||
|
# Get data for each timeframe
|
||||||
|
self.data = {}
|
||||||
|
for tf in self.data_interface.timeframes:
|
||||||
|
df = self.data_interface.dataframes[tf]
|
||||||
|
if df is not None and not df.empty:
|
||||||
|
self.data[tf] = df
|
||||||
|
|
||||||
|
if not self.data:
|
||||||
|
raise ValueError("No data available for training")
|
||||||
|
|
||||||
|
# Use the primary timeframe for step count
|
||||||
|
self.prices = self.data[self.timeframe]['close'].values
|
||||||
|
self.timestamps = self.data[self.timeframe].index.values
|
||||||
|
self.max_steps = len(self.prices) - self.window_size - 1
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Reset the environment to initial state"""
|
||||||
|
# Reset trading variables
|
||||||
|
self.balance = self.initial_balance
|
||||||
|
self.position = 0.0 # No position initially
|
||||||
|
self.entry_price = 0.0
|
||||||
|
self.total_pnl = 0.0
|
||||||
|
self.trades = []
|
||||||
|
self.rewards = []
|
||||||
|
|
||||||
|
# Reset step counter
|
||||||
|
self.current_step = self.window_size
|
||||||
|
|
||||||
|
# Get initial observation
|
||||||
|
observation = self._get_observation()
|
||||||
|
|
||||||
|
return observation
|
||||||
|
|
||||||
|
def step(self, action):
|
||||||
|
"""
|
||||||
|
Take a step in the environment.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
action: Action to take (0: Buy, 1: Sell, 2: Hold)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
tuple: (observation, reward, done, info)
|
||||||
|
"""
|
||||||
|
# Get current state before taking action
|
||||||
|
prev_balance = self.balance
|
||||||
|
prev_position = self.position
|
||||||
|
prev_price = self.prices[self.current_step]
|
||||||
|
|
||||||
|
# Take action
|
||||||
|
info = {}
|
||||||
|
reward = 0
|
||||||
|
last_position_info = None
|
||||||
|
|
||||||
|
# Get current price
|
||||||
|
current_price = self.prices[self.current_step]
|
||||||
|
next_price = self.prices[self.current_step + 1] if self.current_step + 1 < len(self.prices) else current_price
|
||||||
|
|
||||||
|
# Process the action
|
||||||
|
if action == 0: # Buy
|
||||||
|
if self.position <= 0: # Only buy if not already long
|
||||||
|
# Close any existing short position
|
||||||
|
if self.position < 0:
|
||||||
|
close_pnl, last_position_info = self._close_position(current_price)
|
||||||
|
reward += close_pnl * self.reward_scaling
|
||||||
|
|
||||||
|
# Open new long position
|
||||||
|
self._open_position(1.0 * self.max_position, current_price)
|
||||||
|
logger.info(f"Buy at step {self.current_step}, price: {current_price:.4f}, position: {self.position:.6f}")
|
||||||
|
|
||||||
|
elif action == 1: # Sell
|
||||||
|
if self.position >= 0: # Only sell if not already short
|
||||||
|
# Close any existing long position
|
||||||
|
if self.position > 0:
|
||||||
|
close_pnl, last_position_info = self._close_position(current_price)
|
||||||
|
reward += close_pnl * self.reward_scaling
|
||||||
|
|
||||||
|
# Open new short position
|
||||||
|
self._open_position(-1.0 * self.max_position, current_price)
|
||||||
|
logger.info(f"Sell at step {self.current_step}, price: {current_price:.4f}, position: {self.position:.6f}")
|
||||||
|
|
||||||
|
elif action == 2: # Hold
|
||||||
|
# No action, but still calculate unrealized PnL for reward
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Calculate unrealized PnL and add to reward
|
||||||
|
if self.position != 0:
|
||||||
|
unrealized_pnl = self._calculate_unrealized_pnl(next_price)
|
||||||
|
reward += unrealized_pnl * self.reward_scaling * 0.1 # Scale down unrealized PnL
|
||||||
|
|
||||||
|
# Apply penalties for holding a position
|
||||||
|
if self.position != 0:
|
||||||
|
# Small holding fee/interest
|
||||||
|
holding_penalty = abs(self.position) * 0.0001 # 0.01% per step
|
||||||
|
reward -= holding_penalty * self.reward_scaling
|
||||||
|
|
||||||
|
# Move to next step
|
||||||
|
self.current_step += 1
|
||||||
|
|
||||||
|
# Get new observation
|
||||||
|
observation = self._get_observation()
|
||||||
|
|
||||||
|
# Check if episode is done
|
||||||
|
done = self.current_step >= len(self.prices) - 1
|
||||||
|
|
||||||
|
# If done, close any remaining positions
|
||||||
|
if done and self.position != 0:
|
||||||
|
final_pnl, last_position_info = self._close_position(current_price)
|
||||||
|
reward += final_pnl * self.reward_scaling
|
||||||
|
info['final_pnl'] = final_pnl
|
||||||
|
info['final_balance'] = self.balance
|
||||||
|
logger.info(f"Episode ended. Final balance: {self.balance:.4f}, Return: {(self.balance/self.initial_balance-1)*100:.2f}%")
|
||||||
|
|
||||||
|
# Track trade result if position changed or position was closed
|
||||||
|
if prev_position != self.position or last_position_info is not None:
|
||||||
|
# Calculate realized PnL if position was closed
|
||||||
|
realized_pnl = 0
|
||||||
|
position_info = {}
|
||||||
|
|
||||||
|
if last_position_info is not None:
|
||||||
|
# Use the position information from closing
|
||||||
|
realized_pnl = last_position_info['pnl']
|
||||||
|
position_info = last_position_info
|
||||||
|
else:
|
||||||
|
# Calculate manually based on balance change
|
||||||
|
realized_pnl = self.balance - prev_balance if prev_position != 0 else 0
|
||||||
|
|
||||||
|
# Record detailed trade information
|
||||||
|
trade_result = {
|
||||||
|
'step': self.current_step,
|
||||||
|
'timestamp': self.timestamps[self.current_step],
|
||||||
|
'action': action,
|
||||||
|
'action_name': ['BUY', 'SELL', 'HOLD'][action],
|
||||||
|
'price': current_price,
|
||||||
|
'position_changed': prev_position != self.position,
|
||||||
|
'prev_position': prev_position,
|
||||||
|
'new_position': self.position,
|
||||||
|
'position_size': abs(self.position) if self.position != 0 else abs(prev_position),
|
||||||
|
'entry_price': position_info.get('entry_price', self.entry_price),
|
||||||
|
'exit_price': position_info.get('exit_price', current_price),
|
||||||
|
'realized_pnl': realized_pnl,
|
||||||
|
'unrealized_pnl': self._calculate_unrealized_pnl(current_price) if self.position != 0 else 0,
|
||||||
|
'pnl': realized_pnl, # Total PnL (realized for this step)
|
||||||
|
'balance_before': prev_balance,
|
||||||
|
'balance_after': self.balance,
|
||||||
|
'trade_fee': position_info.get('fee', abs(self.position - prev_position) * current_price * self.transaction_fee)
|
||||||
|
}
|
||||||
|
info['trade_result'] = trade_result
|
||||||
|
self.trades.append(trade_result)
|
||||||
|
|
||||||
|
# Log trade details
|
||||||
|
logger.info(f"Trade executed - Action: {['BUY', 'SELL', 'HOLD'][action]}, "
|
||||||
|
f"Price: {current_price:.4f}, PnL: {realized_pnl:.4f}, "
|
||||||
|
f"Balance: {self.balance:.4f}")
|
||||||
|
|
||||||
|
# Store reward
|
||||||
|
self.rewards.append(reward)
|
||||||
|
|
||||||
|
# Update info dict with current state
|
||||||
|
info.update({
|
||||||
|
'step': self.current_step,
|
||||||
|
'price': current_price,
|
||||||
|
'prev_price': prev_price,
|
||||||
|
'price_change': (current_price - prev_price) / prev_price if prev_price != 0 else 0,
|
||||||
|
'balance': self.balance,
|
||||||
|
'position': self.position,
|
||||||
|
'entry_price': self.entry_price,
|
||||||
|
'unrealized_pnl': self._calculate_unrealized_pnl(current_price) if self.position != 0 else 0.0,
|
||||||
|
'total_trades': len(self.trades),
|
||||||
|
'total_pnl': self.total_pnl,
|
||||||
|
'return_pct': (self.balance/self.initial_balance-1)*100
|
||||||
|
})
|
||||||
|
|
||||||
|
return observation, reward, done, info
|
||||||
|
|
||||||
|
def _calculate_unrealized_pnl(self, current_price):
|
||||||
|
"""Calculate unrealized PnL for current position"""
|
||||||
|
if self.position == 0 or self.entry_price == 0:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
if self.position > 0: # Long position
|
||||||
|
return self.position * (current_price / self.entry_price - 1.0)
|
||||||
|
else: # Short position
|
||||||
|
return -self.position * (1.0 - current_price / self.entry_price)
|
||||||
|
|
||||||
|
def _open_position(self, position_size, price):
|
||||||
|
"""Open a new position"""
|
||||||
|
self.position = position_size
|
||||||
|
self.entry_price = price
|
||||||
|
|
||||||
|
def _close_position(self, price):
|
||||||
|
"""Close the current position and return PnL"""
|
||||||
|
pnl = self._calculate_unrealized_pnl(price)
|
||||||
|
|
||||||
|
# Apply transaction fee
|
||||||
|
fee = abs(self.position) * price * self.transaction_fee
|
||||||
|
pnl -= fee
|
||||||
|
|
||||||
|
# Update balance
|
||||||
|
self.balance += pnl
|
||||||
|
self.total_pnl += pnl
|
||||||
|
|
||||||
|
# Store position details before resetting
|
||||||
|
last_position = {
|
||||||
|
'position_size': self.position,
|
||||||
|
'entry_price': self.entry_price,
|
||||||
|
'exit_price': price,
|
||||||
|
'pnl': pnl,
|
||||||
|
'fee': fee
|
||||||
|
}
|
||||||
|
|
||||||
|
# Reset position
|
||||||
|
self.position = 0.0
|
||||||
|
self.entry_price = 0.0
|
||||||
|
|
||||||
|
# Log position closure
|
||||||
|
logger.info(f"Closed position - Size: {last_position['position_size']:.4f}, "
|
||||||
|
f"Entry: {last_position['entry_price']:.4f}, Exit: {last_position['exit_price']:.4f}, "
|
||||||
|
f"PnL: {last_position['pnl']:.4f}, Fee: {last_position['fee']:.4f}")
|
||||||
|
|
||||||
|
return pnl, last_position
|
||||||
|
|
||||||
|
def _get_observation(self):
|
||||||
|
"""
|
||||||
|
Get the current observation.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
np.array: The observation vector
|
||||||
|
"""
|
||||||
|
observations = []
|
||||||
|
|
||||||
|
# Get data from each timeframe
|
||||||
|
for tf in self.data_interface.timeframes:
|
||||||
|
if tf in self.data:
|
||||||
|
# Get the window of data for this timeframe
|
||||||
|
df = self.data[tf]
|
||||||
|
start_idx = self._align_timeframe_index(tf)
|
||||||
|
|
||||||
|
if start_idx is not None and start_idx >= 0 and start_idx + self.window_size <= len(df):
|
||||||
|
window = df.iloc[start_idx:start_idx + self.window_size]
|
||||||
|
|
||||||
|
# Extract OHLCV data
|
||||||
|
ohlcv = window[['open', 'high', 'low', 'close', 'volume']].values
|
||||||
|
|
||||||
|
# Normalize OHLCV data
|
||||||
|
last_close = ohlcv[-1, 3] # Last close price
|
||||||
|
ohlcv_normalized = np.zeros_like(ohlcv)
|
||||||
|
ohlcv_normalized[:, 0] = ohlcv[:, 0] / last_close - 1.0 # open
|
||||||
|
ohlcv_normalized[:, 1] = ohlcv[:, 1] / last_close - 1.0 # high
|
||||||
|
ohlcv_normalized[:, 2] = ohlcv[:, 2] / last_close - 1.0 # low
|
||||||
|
ohlcv_normalized[:, 3] = ohlcv[:, 3] / last_close - 1.0 # close
|
||||||
|
|
||||||
|
# Normalize volume (relative to moving average of volume)
|
||||||
|
if 'volume' in window.columns:
|
||||||
|
volume_ma = ohlcv[:, 4].mean()
|
||||||
|
if volume_ma > 0:
|
||||||
|
ohlcv_normalized[:, 4] = ohlcv[:, 4] / volume_ma - 1.0
|
||||||
|
else:
|
||||||
|
ohlcv_normalized[:, 4] = 0.0
|
||||||
|
else:
|
||||||
|
ohlcv_normalized[:, 4] = 0.0
|
||||||
|
|
||||||
|
# Flatten and add to observations
|
||||||
|
observations.append(ohlcv_normalized.flatten())
|
||||||
|
else:
|
||||||
|
# Fill with zeros if not enough data
|
||||||
|
observations.append(np.zeros(self.window_size * 5))
|
||||||
|
|
||||||
|
# Add position and balance information
|
||||||
|
current_price = self.prices[self.current_step]
|
||||||
|
position_info = np.array([
|
||||||
|
self.position / self.max_position, # Normalized position (-1 to 1)
|
||||||
|
self.balance / self.initial_balance - 1.0, # Normalized balance change
|
||||||
|
self._calculate_unrealized_pnl(current_price) # Unrealized PnL
|
||||||
|
])
|
||||||
|
|
||||||
|
observations.append(position_info)
|
||||||
|
|
||||||
|
# Concatenate all observations
|
||||||
|
observation = np.concatenate(observations)
|
||||||
|
return observation
|
||||||
|
|
||||||
|
def _align_timeframe_index(self, timeframe):
|
||||||
|
"""
|
||||||
|
Align the index of a higher timeframe with the current step in the primary timeframe.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
timeframe: The timeframe to align
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: The starting index in the higher timeframe
|
||||||
|
"""
|
||||||
|
if timeframe == self.timeframe:
|
||||||
|
return self.current_step - self.window_size
|
||||||
|
|
||||||
|
# Get timestamps for current primary timeframe step
|
||||||
|
primary_ts = self.timestamps[self.current_step]
|
||||||
|
|
||||||
|
# Find closest index in the higher timeframe
|
||||||
|
higher_ts = self.data[timeframe].index.values
|
||||||
|
idx = np.searchsorted(higher_ts, primary_ts)
|
||||||
|
|
||||||
|
# Adjust to get the starting index
|
||||||
|
start_idx = max(0, idx - self.window_size)
|
||||||
|
return start_idx
|
||||||
|
|
||||||
|
def get_last_positions(self, n=5):
|
||||||
|
"""
|
||||||
|
Get detailed information about the last n positions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
n: Number of last positions to return
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: List of dictionaries containing position details
|
||||||
|
"""
|
||||||
|
if not self.trades:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# Filter trades to only include those that closed positions
|
||||||
|
position_trades = [t for t in self.trades if t.get('realized_pnl', 0) != 0 or (t.get('prev_position', 0) != 0 and t.get('new_position', 0) == 0)]
|
||||||
|
|
||||||
|
positions = []
|
||||||
|
last_n_trades = position_trades[-n:] if len(position_trades) >= n else position_trades
|
||||||
|
|
||||||
|
for trade in last_n_trades:
|
||||||
|
position_info = {
|
||||||
|
'timestamp': trade.get('timestamp', self.timestamps[trade['step']]),
|
||||||
|
'action': trade.get('action_name', ['BUY', 'SELL', 'HOLD'][trade['action']]),
|
||||||
|
'entry_price': trade.get('entry_price', 0.0),
|
||||||
|
'exit_price': trade.get('exit_price', trade['price']),
|
||||||
|
'position_size': trade.get('position_size', self.max_position),
|
||||||
|
'realized_pnl': trade.get('realized_pnl', 0.0),
|
||||||
|
'fee': trade.get('trade_fee', 0.0),
|
||||||
|
'pnl': trade.get('pnl', 0.0),
|
||||||
|
'pnl_percentage': (trade.get('pnl', 0.0) / self.initial_balance) * 100,
|
||||||
|
'balance_before': trade.get('balance_before', 0.0),
|
||||||
|
'balance_after': trade.get('balance_after', 0.0),
|
||||||
|
'duration': trade.get('duration', 'N/A')
|
||||||
|
}
|
||||||
|
positions.append(position_info)
|
||||||
|
|
||||||
|
return positions
|
||||||
|
|
||||||
|
def render(self, mode='human'):
|
||||||
|
"""Render the environment"""
|
||||||
|
current_step = self.current_step
|
||||||
|
current_price = self.prices[current_step]
|
||||||
|
|
||||||
|
# Display basic information
|
||||||
|
print(f"\nTrading Environment Status:")
|
||||||
|
print(f"============================")
|
||||||
|
print(f"Step: {current_step}/{len(self.prices)-1}")
|
||||||
|
print(f"Current Price: {current_price:.4f}")
|
||||||
|
print(f"Current Balance: {self.balance:.4f}")
|
||||||
|
print(f"Current Position: {self.position:.4f}")
|
||||||
|
|
||||||
|
if self.position != 0:
|
||||||
|
unrealized_pnl = self._calculate_unrealized_pnl(current_price)
|
||||||
|
print(f"Entry Price: {self.entry_price:.4f}")
|
||||||
|
print(f"Unrealized PnL: {unrealized_pnl:.4f} ({unrealized_pnl/self.balance*100:.2f}%)")
|
||||||
|
|
||||||
|
print(f"Total PnL: {self.total_pnl:.4f} ({self.total_pnl/self.initial_balance*100:.2f}%)")
|
||||||
|
print(f"Total Trades: {len(self.trades)}")
|
||||||
|
|
||||||
|
if len(self.trades) > 0:
|
||||||
|
win_trades = [t for t in self.trades if t.get('realized_pnl', 0) > 0]
|
||||||
|
win_count = len(win_trades)
|
||||||
|
# Count trades that closed positions (not just changed them)
|
||||||
|
closed_positions = [t for t in self.trades if t.get('realized_pnl', 0) != 0]
|
||||||
|
closed_count = len(closed_positions)
|
||||||
|
win_rate = win_count / closed_count if closed_count > 0 else 0
|
||||||
|
print(f"Positions Closed: {closed_count}")
|
||||||
|
print(f"Winning Positions: {win_count}")
|
||||||
|
print(f"Win Rate: {win_rate:.2f}")
|
||||||
|
|
||||||
|
# Display last 5 positions
|
||||||
|
print("\nLast 5 Positions:")
|
||||||
|
print("================")
|
||||||
|
last_positions = self.get_last_positions(5)
|
||||||
|
|
||||||
|
if not last_positions:
|
||||||
|
print("No closed positions yet.")
|
||||||
|
|
||||||
|
for pos in last_positions:
|
||||||
|
print(f"Time: {pos['timestamp']}")
|
||||||
|
print(f"Action: {pos['action']}")
|
||||||
|
print(f"Entry: {pos['entry_price']:.4f}, Exit: {pos['exit_price']:.4f}")
|
||||||
|
print(f"Size: {pos['position_size']:.4f}")
|
||||||
|
print(f"PnL: {pos['realized_pnl']:.4f} ({pos['pnl_percentage']:.2f}%)")
|
||||||
|
print(f"Fee: {pos['fee']:.4f}")
|
||||||
|
print(f"Balance: {pos['balance_before']:.4f} -> {pos['balance_after']:.4f}")
|
||||||
|
print("----------------")
|
||||||
|
|
||||||
|
return
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Close the environment"""
|
||||||
|
pass
|
@ -78,17 +78,25 @@ class CNNPyTorch(nn.Module):
|
|||||||
window_size, num_features = input_shape
|
window_size, num_features = input_shape
|
||||||
self.window_size = window_size
|
self.window_size = window_size
|
||||||
|
|
||||||
# Simpler architecture with fewer layers and dropout
|
# Increased complexity
|
||||||
self.conv1 = nn.Sequential(
|
self.conv1 = nn.Sequential(
|
||||||
nn.Conv1d(num_features, 32, kernel_size=3, padding=1),
|
nn.Conv1d(num_features, 64, kernel_size=3, padding=1), # Increased filters
|
||||||
nn.BatchNorm1d(32),
|
nn.BatchNorm1d(64),
|
||||||
nn.ReLU(),
|
nn.ReLU(),
|
||||||
nn.Dropout(0.2)
|
nn.Dropout(0.2)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.conv2 = nn.Sequential(
|
self.conv2 = nn.Sequential(
|
||||||
nn.Conv1d(32, 64, kernel_size=3, padding=1),
|
nn.Conv1d(64, 128, kernel_size=3, padding=1), # Increased filters
|
||||||
nn.BatchNorm1d(64),
|
nn.BatchNorm1d(128),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.2)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Added third conv layer
|
||||||
|
self.conv3 = nn.Sequential(
|
||||||
|
nn.Conv1d(128, 128, kernel_size=3, padding=1),
|
||||||
|
nn.BatchNorm1d(128),
|
||||||
nn.ReLU(),
|
nn.ReLU(),
|
||||||
nn.Dropout(0.2)
|
nn.Dropout(0.2)
|
||||||
)
|
)
|
||||||
@ -96,12 +104,12 @@ class CNNPyTorch(nn.Module):
|
|||||||
# Global average pooling to handle variable length sequences
|
# Global average pooling to handle variable length sequences
|
||||||
self.global_pool = nn.AdaptiveAvgPool1d(1)
|
self.global_pool = nn.AdaptiveAvgPool1d(1)
|
||||||
|
|
||||||
# Fully connected layers
|
# Fully connected layers (updated input size and hidden size)
|
||||||
self.fc = nn.Sequential(
|
self.fc = nn.Sequential(
|
||||||
nn.Linear(64, 32),
|
nn.Linear(128, 64), # Updated input size from conv3, increased hidden size
|
||||||
nn.ReLU(),
|
nn.ReLU(),
|
||||||
nn.Dropout(0.2),
|
nn.Dropout(0.2),
|
||||||
nn.Linear(32, output_size)
|
nn.Linear(64, output_size)
|
||||||
)
|
)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
@ -120,10 +128,11 @@ class CNNPyTorch(nn.Module):
|
|||||||
# Convolutional layers
|
# Convolutional layers
|
||||||
x = self.conv1(x)
|
x = self.conv1(x)
|
||||||
x = self.conv2(x)
|
x = self.conv2(x)
|
||||||
|
x = self.conv3(x) # Added conv3 pass
|
||||||
|
|
||||||
# Global pooling
|
# Global pooling
|
||||||
x = self.global_pool(x)
|
x = self.global_pool(x)
|
||||||
x = x.squeeze(-1)
|
x = x.squeeze(-1) # Shape becomes [batch, 128]
|
||||||
|
|
||||||
# Fully connected layers
|
# Fully connected layers
|
||||||
action_logits = self.fc(x)
|
action_logits = self.fc(x)
|
||||||
@ -216,6 +225,8 @@ class CNNModelPyTorch:
|
|||||||
self.last_actions = [[] for _ in range(num_pairs)] # Track recent actions per pair
|
self.last_actions = [[] for _ in range(num_pairs)] # Track recent actions per pair
|
||||||
|
|
||||||
def train_epoch(self, X_train, y_train, future_prices, batch_size):
|
def train_epoch(self, X_train, y_train, future_prices, batch_size):
|
||||||
|
# Add a call to predict_extrema here
|
||||||
|
self.predict_extrema(X_train)
|
||||||
"""Train the model for one epoch with focus on short-term pattern recognition"""
|
"""Train the model for one epoch with focus on short-term pattern recognition"""
|
||||||
self.model.train()
|
self.model.train()
|
||||||
total_loss = 0
|
total_loss = 0
|
||||||
@ -321,7 +332,8 @@ class CNNModelPyTorch:
|
|||||||
|
|
||||||
return avg_loss, 0, accuracy # Return 0 for price_loss as we're not using it
|
return avg_loss, 0, accuracy # Return 0 for price_loss as we're not using it
|
||||||
|
|
||||||
def predict(self, X):
|
def predict_extrema(self, X):
|
||||||
|
# Predict local extrema (lows and highs) based on input data
|
||||||
"""Make predictions optimized for short-term high-leverage trading signals"""
|
"""Make predictions optimized for short-term high-leverage trading signals"""
|
||||||
self.model.eval()
|
self.model.eval()
|
||||||
|
|
||||||
|
@ -54,6 +54,7 @@ class DQNAgent:
|
|||||||
self.epsilon = epsilon
|
self.epsilon = epsilon
|
||||||
self.epsilon_min = epsilon_min
|
self.epsilon_min = epsilon_min
|
||||||
self.epsilon_decay = epsilon_decay
|
self.epsilon_decay = epsilon_decay
|
||||||
|
self.epsilon_start = epsilon # Store initial epsilon value for resets/bumps
|
||||||
self.buffer_size = buffer_size
|
self.buffer_size = buffer_size
|
||||||
self.batch_size = batch_size
|
self.batch_size = batch_size
|
||||||
self.target_update = target_update
|
self.target_update = target_update
|
||||||
@ -127,6 +128,28 @@ class DQNAgent:
|
|||||||
self.best_reward = -float('inf')
|
self.best_reward = -float('inf')
|
||||||
self.no_improvement_count = 0
|
self.no_improvement_count = 0
|
||||||
|
|
||||||
|
# Confidence tracking
|
||||||
|
self.confidence_history = []
|
||||||
|
self.avg_confidence = 0.0
|
||||||
|
self.max_confidence = 0.0
|
||||||
|
self.min_confidence = 1.0
|
||||||
|
|
||||||
|
# Trade action fee and confidence thresholds
|
||||||
|
self.trade_action_fee = 0.0005 # Small fee to discourage unnecessary trading
|
||||||
|
self.minimum_action_confidence = 0.5 # Minimum confidence to consider trading
|
||||||
|
self.recent_actions = [] # Track recent actions to avoid oscillations
|
||||||
|
|
||||||
|
# Violent move detection
|
||||||
|
self.price_history = []
|
||||||
|
self.volatility_window = 20 # Window size for volatility calculation
|
||||||
|
self.volatility_threshold = 0.0015 # Threshold for considering a move "violent"
|
||||||
|
self.post_violent_move = False # Flag for recent violent move
|
||||||
|
self.violent_move_cooldown = 0 # Cooldown after violent move
|
||||||
|
|
||||||
|
# Feature integration
|
||||||
|
self.last_hidden_features = None # Store last extracted features
|
||||||
|
self.feature_history = [] # Store history of features for analysis
|
||||||
|
|
||||||
# Check if mixed precision training should be used
|
# Check if mixed precision training should be used
|
||||||
self.use_mixed_precision = False
|
self.use_mixed_precision = False
|
||||||
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and 'DISABLE_MIXED_PRECISION' not in os.environ:
|
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and 'DISABLE_MIXED_PRECISION' not in os.environ:
|
||||||
@ -146,6 +169,7 @@ class DQNAgent:
|
|||||||
self.timeframes = ["1m", "5m", "15m"][:self.state_dim[0]] # Default timeframes
|
self.timeframes = ["1m", "5m", "15m"][:self.state_dim[0]] # Default timeframes
|
||||||
|
|
||||||
logger.info(f"DQN Agent using device: {self.device}")
|
logger.info(f"DQN Agent using device: {self.device}")
|
||||||
|
logger.info(f"Trade action fee set to {self.trade_action_fee}, minimum confidence: {self.minimum_action_confidence}")
|
||||||
|
|
||||||
def move_models_to_device(self, device=None):
|
def move_models_to_device(self, device=None):
|
||||||
"""Move models to the specified device (GPU/CPU)"""
|
"""Move models to the specified device (GPU/CPU)"""
|
||||||
@ -189,8 +213,20 @@ class DQNAgent:
|
|||||||
current_price = state[-1] # Last feature
|
current_price = state[-1] # Last feature
|
||||||
next_price = next_state[-1]
|
next_price = next_state[-1]
|
||||||
|
|
||||||
# Calculate price change
|
# Calculate price change - avoid division by zero
|
||||||
price_change = (next_price - current_price) / current_price
|
if np.isscalar(current_price) and current_price != 0:
|
||||||
|
price_change = (next_price - current_price) / current_price
|
||||||
|
elif isinstance(current_price, np.ndarray):
|
||||||
|
# Handle array case - protect against division by zero
|
||||||
|
with np.errstate(divide='ignore', invalid='ignore'):
|
||||||
|
price_change = (next_price - current_price) / current_price
|
||||||
|
# Replace infinities and NaNs with zeros
|
||||||
|
if isinstance(price_change, np.ndarray):
|
||||||
|
price_change = np.nan_to_num(price_change, nan=0.0, posinf=0.0, neginf=0.0)
|
||||||
|
else:
|
||||||
|
price_change = 0.0 if np.isnan(price_change) or np.isinf(price_change) else price_change
|
||||||
|
else:
|
||||||
|
price_change = 0.0
|
||||||
|
|
||||||
# Check if this is a significant price movement
|
# Check if this is a significant price movement
|
||||||
if abs(price_change) > 0.002: # Significant price change
|
if abs(price_change) > 0.002: # Significant price change
|
||||||
@ -264,9 +300,17 @@ class DQNAgent:
|
|||||||
|
|
||||||
# Get predictions using the policy network
|
# Get predictions using the policy network
|
||||||
self.policy_net.eval() # Set to evaluation mode for inference
|
self.policy_net.eval() # Set to evaluation mode for inference
|
||||||
action_probs, extrema_pred, price_predictions = self.policy_net(state_tensor)
|
action_probs, extrema_pred, price_predictions, hidden_features = self.policy_net(state_tensor)
|
||||||
self.policy_net.train() # Back to training mode
|
self.policy_net.train() # Back to training mode
|
||||||
|
|
||||||
|
# Store hidden features for integration
|
||||||
|
self.last_hidden_features = hidden_features.cpu().numpy()
|
||||||
|
|
||||||
|
# Track feature history (limited size)
|
||||||
|
self.feature_history.append(hidden_features.cpu().numpy())
|
||||||
|
if len(self.feature_history) > 100:
|
||||||
|
self.feature_history = self.feature_history[-100:]
|
||||||
|
|
||||||
# Get the predicted extrema class (0=bottom, 1=top, 2=neither)
|
# Get the predicted extrema class (0=bottom, 1=top, 2=neither)
|
||||||
extrema_class = extrema_pred.argmax(dim=1).item()
|
extrema_class = extrema_pred.argmax(dim=1).item()
|
||||||
extrema_confidence = torch.softmax(extrema_pred, dim=1)[0, extrema_class].item()
|
extrema_confidence = torch.softmax(extrema_pred, dim=1)[0, extrema_class].item()
|
||||||
@ -336,17 +380,120 @@ class DQNAgent:
|
|||||||
# Get the action with highest Q-value
|
# Get the action with highest Q-value
|
||||||
action = action_probs.argmax().item()
|
action = action_probs.argmax().item()
|
||||||
|
|
||||||
|
# Calculate overall confidence in the action
|
||||||
|
q_values_softmax = F.softmax(action_probs, dim=1)[0]
|
||||||
|
action_confidence = q_values_softmax[action].item()
|
||||||
|
|
||||||
|
# Track confidence metrics
|
||||||
|
self.confidence_history.append(action_confidence)
|
||||||
|
if len(self.confidence_history) > 100:
|
||||||
|
self.confidence_history = self.confidence_history[-100:]
|
||||||
|
|
||||||
|
# Update confidence metrics
|
||||||
|
self.avg_confidence = sum(self.confidence_history) / len(self.confidence_history)
|
||||||
|
self.max_confidence = max(self.max_confidence, action_confidence)
|
||||||
|
self.min_confidence = min(self.min_confidence, action_confidence)
|
||||||
|
|
||||||
|
# Log average confidence occasionally
|
||||||
|
if random.random() < 0.01: # 1% of the time
|
||||||
|
logger.info(f"Confidence metrics - Current: {action_confidence:.4f}, Avg: {self.avg_confidence:.4f}, " +
|
||||||
|
f"Min: {self.min_confidence:.4f}, Max: {self.max_confidence:.4f}")
|
||||||
|
|
||||||
|
# Track price for violent move detection
|
||||||
|
try:
|
||||||
|
# Extract current price from state (assuming it's in the last position)
|
||||||
|
if len(state.shape) > 1: # For 2D state
|
||||||
|
current_price = state[-1, -1]
|
||||||
|
else: # For 1D state
|
||||||
|
current_price = state[-1]
|
||||||
|
|
||||||
|
self.price_history.append(current_price)
|
||||||
|
if len(self.price_history) > self.volatility_window:
|
||||||
|
self.price_history = self.price_history[-self.volatility_window:]
|
||||||
|
|
||||||
|
# Detect violent price moves if we have enough price history
|
||||||
|
if len(self.price_history) >= 5:
|
||||||
|
# Calculate short-term volatility
|
||||||
|
recent_prices = self.price_history[-5:]
|
||||||
|
|
||||||
|
# Make sure we're working with scalar values, not arrays
|
||||||
|
if isinstance(recent_prices[0], np.ndarray):
|
||||||
|
# If prices are arrays, extract the last value (current price)
|
||||||
|
recent_prices = [p[-1] if isinstance(p, np.ndarray) and p.size > 0 else p for p in recent_prices]
|
||||||
|
|
||||||
|
# Calculate price changes with protection against division by zero
|
||||||
|
price_changes = []
|
||||||
|
for i in range(1, len(recent_prices)):
|
||||||
|
if recent_prices[i-1] != 0 and not np.isnan(recent_prices[i-1]) and not np.isnan(recent_prices[i]):
|
||||||
|
change = (recent_prices[i] - recent_prices[i-1]) / recent_prices[i-1]
|
||||||
|
price_changes.append(change)
|
||||||
|
else:
|
||||||
|
price_changes.append(0.0)
|
||||||
|
|
||||||
|
# Calculate volatility as sum of absolute price changes
|
||||||
|
volatility = sum([abs(change) for change in price_changes])
|
||||||
|
|
||||||
|
# Check if we've had a violent move
|
||||||
|
if volatility > self.volatility_threshold:
|
||||||
|
logger.info(f"Violent price move detected! Volatility: {volatility:.6f}")
|
||||||
|
self.post_violent_move = True
|
||||||
|
self.violent_move_cooldown = 10 # Set cooldown period
|
||||||
|
|
||||||
|
# Handle post-violent move period
|
||||||
|
if self.post_violent_move:
|
||||||
|
if self.violent_move_cooldown > 0:
|
||||||
|
self.violent_move_cooldown -= 1
|
||||||
|
# Increase confidence threshold temporarily after violent moves
|
||||||
|
effective_threshold = self.minimum_action_confidence * 1.1
|
||||||
|
logger.info(f"Post-violent move period: {self.violent_move_cooldown} steps remaining. " +
|
||||||
|
f"Using higher confidence threshold: {effective_threshold:.4f}")
|
||||||
|
else:
|
||||||
|
self.post_violent_move = False
|
||||||
|
logger.info("Post-violent move period ended")
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error in violent move detection: {str(e)}")
|
||||||
|
|
||||||
|
# Apply trade action fee to buy/sell actions but not to hold
|
||||||
|
# This creates a threshold that must be exceeded to justify a trade
|
||||||
|
action_values = action_probs.clone()
|
||||||
|
|
||||||
|
# If BUY or SELL, apply fee by reducing the Q-value
|
||||||
|
if action == 0 or action == 1: # BUY or SELL
|
||||||
|
# Check if confidence is above minimum threshold
|
||||||
|
effective_threshold = self.minimum_action_confidence
|
||||||
|
if self.post_violent_move:
|
||||||
|
effective_threshold *= 1.1 # Higher threshold after violent moves
|
||||||
|
|
||||||
|
if action_confidence < effective_threshold:
|
||||||
|
# If confidence is below threshold, force HOLD action
|
||||||
|
logger.info(f"Action {action} confidence {action_confidence:.4f} below threshold {effective_threshold}, forcing HOLD")
|
||||||
|
action = 2 # HOLD
|
||||||
|
else:
|
||||||
|
# Apply trade action fee to ensure we only trade when there's clear benefit
|
||||||
|
fee_adjusted_action_values = action_values.clone()
|
||||||
|
fee_adjusted_action_values[0, 0] -= self.trade_action_fee # Reduce BUY value
|
||||||
|
fee_adjusted_action_values[0, 1] -= self.trade_action_fee # Reduce SELL value
|
||||||
|
# Hold value remains unchanged
|
||||||
|
|
||||||
|
# Re-determine the action based on fee-adjusted values
|
||||||
|
fee_adjusted_action = fee_adjusted_action_values.argmax().item()
|
||||||
|
|
||||||
|
# If the fee changes our decision, log this
|
||||||
|
if fee_adjusted_action != action:
|
||||||
|
logger.info(f"Trade action fee changed decision from {action} to {fee_adjusted_action}")
|
||||||
|
action = fee_adjusted_action
|
||||||
|
|
||||||
# Adjust action based on extrema and price predictions
|
# Adjust action based on extrema and price predictions
|
||||||
# Prioritize short-term movement for trading decisions
|
# Prioritize short-term movement for trading decisions
|
||||||
if immediate_conf > 0.8: # Only adjust for strong signals
|
if immediate_conf > 0.8: # Only adjust for strong signals
|
||||||
if immediate_direction == 2: # UP prediction
|
if immediate_direction == 2: # UP prediction
|
||||||
# Bias toward BUY for strong up predictions
|
# Bias toward BUY for strong up predictions
|
||||||
if action != 0 and random.random() < 0.3 * immediate_conf:
|
if action != 0 and action != 2 and random.random() < 0.3 * immediate_conf:
|
||||||
logger.info(f"Adjusting action to BUY based on immediate UP prediction")
|
logger.info(f"Adjusting action to BUY based on immediate UP prediction")
|
||||||
action = 0 # BUY
|
action = 0 # BUY
|
||||||
elif immediate_direction == 0: # DOWN prediction
|
elif immediate_direction == 0: # DOWN prediction
|
||||||
# Bias toward SELL for strong down predictions
|
# Bias toward SELL for strong down predictions
|
||||||
if action != 1 and random.random() < 0.3 * immediate_conf:
|
if action != 1 and action != 2 and random.random() < 0.3 * immediate_conf:
|
||||||
logger.info(f"Adjusting action to SELL based on immediate DOWN prediction")
|
logger.info(f"Adjusting action to SELL based on immediate DOWN prediction")
|
||||||
action = 1 # SELL
|
action = 1 # SELL
|
||||||
|
|
||||||
@ -354,333 +501,217 @@ class DQNAgent:
|
|||||||
if extrema_confidence > 0.8: # Only adjust for strong signals
|
if extrema_confidence > 0.8: # Only adjust for strong signals
|
||||||
if extrema_class == 0: # Bottom detected
|
if extrema_class == 0: # Bottom detected
|
||||||
# Bias toward BUY at bottoms
|
# Bias toward BUY at bottoms
|
||||||
if action != 0 and random.random() < 0.3 * extrema_confidence:
|
if action != 0 and action != 2 and random.random() < 0.3 * extrema_confidence:
|
||||||
logger.info(f"Adjusting action to BUY based on bottom detection")
|
logger.info(f"Adjusting action to BUY based on bottom detection")
|
||||||
action = 0 # BUY
|
action = 0 # BUY
|
||||||
elif extrema_class == 1: # Top detected
|
elif extrema_class == 1: # Top detected
|
||||||
# Bias toward SELL at tops
|
# Bias toward SELL at tops
|
||||||
if action != 1 and random.random() < 0.3 * extrema_confidence:
|
if action != 1 and action != 2 and random.random() < 0.3 * extrema_confidence:
|
||||||
logger.info(f"Adjusting action to SELL based on top detection")
|
logger.info(f"Adjusting action to SELL based on top detection")
|
||||||
action = 1 # SELL
|
action = 1 # SELL
|
||||||
|
|
||||||
|
# Finally, avoid action oscillation by checking recent history
|
||||||
|
if len(self.recent_actions) >= 2:
|
||||||
|
last_action = self.recent_actions[-1]
|
||||||
|
if action != last_action and action != 2 and last_action != 2:
|
||||||
|
# We're switching between BUY and SELL too quickly
|
||||||
|
# Only allow this if we have very high confidence
|
||||||
|
if action_confidence < 0.85:
|
||||||
|
logger.info(f"Preventing oscillation from {last_action} to {action}, forcing HOLD")
|
||||||
|
action = 2 # HOLD
|
||||||
|
|
||||||
|
# Update recent actions list
|
||||||
|
self.recent_actions.append(action)
|
||||||
|
if len(self.recent_actions) > 5:
|
||||||
|
self.recent_actions = self.recent_actions[-5:]
|
||||||
|
|
||||||
return action
|
return action
|
||||||
|
|
||||||
def replay(self, use_prioritized=True) -> float:
|
def replay(self, experiences=None):
|
||||||
"""Experience replay - learn from stored experiences
|
"""Train the model using experiences from memory"""
|
||||||
|
|
||||||
Args:
|
# Don't train if not in training mode
|
||||||
use_prioritized: Whether to use prioritized experience replay
|
if not self.training:
|
||||||
|
|
||||||
Returns:
|
|
||||||
float: Training loss
|
|
||||||
"""
|
|
||||||
# Check if we have enough samples
|
|
||||||
if len(self.memory) < self.batch_size:
|
|
||||||
return 0.0
|
return 0.0
|
||||||
|
|
||||||
# Check if mixed precision should be disabled
|
# If no experiences provided, sample from memory
|
||||||
if 'DISABLE_MIXED_PRECISION' in os.environ:
|
if experiences is None:
|
||||||
self.use_mixed_precision = False
|
# Skip if memory is too small
|
||||||
|
if len(self.memory) < self.batch_size:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
# Sample from memory with or without prioritization
|
# Sample random mini-batch from memory
|
||||||
if use_prioritized and len(self.positive_memory) > self.batch_size // 4:
|
indices = np.random.choice(len(self.memory), size=min(self.batch_size, len(self.memory)), replace=False)
|
||||||
# Use prioritized sampling: mix normal samples with positive reward samples
|
experiences = [self.memory[i] for i in indices]
|
||||||
positive_batch_size = min(self.batch_size // 4, len(self.positive_memory))
|
|
||||||
regular_batch_size = self.batch_size - positive_batch_size
|
|
||||||
|
|
||||||
# Get positive examples
|
|
||||||
positive_batch = random.sample(self.positive_memory, positive_batch_size)
|
|
||||||
|
|
||||||
# Get regular examples
|
|
||||||
regular_batch = random.sample(self.memory, regular_batch_size)
|
|
||||||
|
|
||||||
# Combine batches
|
|
||||||
minibatch = positive_batch + regular_batch
|
|
||||||
else:
|
|
||||||
# Use regular uniform sampling
|
|
||||||
minibatch = random.sample(self.memory, self.batch_size)
|
|
||||||
|
|
||||||
# Extract batches with proper tensor conversion
|
# Choose appropriate replay method
|
||||||
states = np.vstack([self._normalize_state(x[0]) for x in minibatch])
|
|
||||||
actions = np.array([x[1] for x in minibatch])
|
|
||||||
rewards = np.array([x[2] for x in minibatch])
|
|
||||||
next_states = np.vstack([self._normalize_state(x[3]) for x in minibatch])
|
|
||||||
dones = np.array([x[4] for x in minibatch], dtype=np.float32)
|
|
||||||
|
|
||||||
# Convert to torch tensors and move to device
|
|
||||||
states_tensor = torch.FloatTensor(states).to(self.device)
|
|
||||||
actions_tensor = torch.LongTensor(actions).to(self.device)
|
|
||||||
rewards_tensor = torch.FloatTensor(rewards).to(self.device)
|
|
||||||
next_states_tensor = torch.FloatTensor(next_states).to(self.device)
|
|
||||||
dones_tensor = torch.FloatTensor(dones).to(self.device)
|
|
||||||
|
|
||||||
# First training step with mixed precision if available
|
|
||||||
if self.use_mixed_precision:
|
if self.use_mixed_precision:
|
||||||
loss = self._replay_mixed_precision(
|
# Convert experiences to tensors for mixed precision
|
||||||
states_tensor, actions_tensor, rewards_tensor,
|
states = torch.FloatTensor(np.array([e[0] for e in experiences])).to(self.device)
|
||||||
next_states_tensor, dones_tensor
|
actions = torch.LongTensor(np.array([e[1] for e in experiences])).to(self.device)
|
||||||
)
|
rewards = torch.FloatTensor(np.array([e[2] for e in experiences])).to(self.device)
|
||||||
|
next_states = torch.FloatTensor(np.array([e[3] for e in experiences])).to(self.device)
|
||||||
|
dones = torch.FloatTensor(np.array([e[4] for e in experiences])).to(self.device)
|
||||||
|
|
||||||
|
# Use mixed precision replay
|
||||||
|
loss = self._replay_mixed_precision(states, actions, rewards, next_states, dones)
|
||||||
else:
|
else:
|
||||||
loss = self._replay_standard(
|
# Pass experiences directly to standard replay method
|
||||||
states_tensor, actions_tensor, rewards_tensor,
|
loss = self._replay_standard(experiences)
|
||||||
next_states_tensor, dones_tensor
|
|
||||||
)
|
|
||||||
|
|
||||||
# Training focus selector - randomly focus on one of the specialized training types
|
# Store loss for monitoring
|
||||||
training_focus = random.random()
|
|
||||||
|
|
||||||
# Occasionally train specifically on extrema points
|
|
||||||
if training_focus < 0.3 and hasattr(self, 'extrema_memory') and len(self.extrema_memory) >= self.batch_size // 2:
|
|
||||||
# Sample from extrema memory
|
|
||||||
extrema_batch_size = min(self.batch_size // 2, len(self.extrema_memory))
|
|
||||||
extrema_batch = random.sample(self.extrema_memory, extrema_batch_size)
|
|
||||||
|
|
||||||
# Extract batches with proper tensor conversion
|
|
||||||
extrema_states = np.vstack([self._normalize_state(x[0]) for x in extrema_batch])
|
|
||||||
extrema_actions = np.array([x[1] for x in extrema_batch])
|
|
||||||
extrema_rewards = np.array([x[2] for x in extrema_batch])
|
|
||||||
extrema_next_states = np.vstack([self._normalize_state(x[3]) for x in extrema_batch])
|
|
||||||
extrema_dones = np.array([x[4] for x in extrema_batch], dtype=np.float32)
|
|
||||||
|
|
||||||
# Convert to torch tensors and move to device
|
|
||||||
extrema_states_tensor = torch.FloatTensor(extrema_states).to(self.device)
|
|
||||||
extrema_actions_tensor = torch.LongTensor(extrema_actions).to(self.device)
|
|
||||||
extrema_rewards_tensor = torch.FloatTensor(extrema_rewards).to(self.device)
|
|
||||||
extrema_next_states_tensor = torch.FloatTensor(extrema_next_states).to(self.device)
|
|
||||||
extrema_dones_tensor = torch.FloatTensor(extrema_dones).to(self.device)
|
|
||||||
|
|
||||||
# Additional training step focused on extrema points (with smaller learning rate)
|
|
||||||
original_lr = self.optimizer.param_groups[0]['lr']
|
|
||||||
# Temporarily reduce learning rate for fine-tuning on extrema
|
|
||||||
for param_group in self.optimizer.param_groups:
|
|
||||||
param_group['lr'] = original_lr * 0.5
|
|
||||||
|
|
||||||
# Train on extrema
|
|
||||||
if self.use_mixed_precision:
|
|
||||||
extrema_loss = self._replay_mixed_precision(
|
|
||||||
extrema_states_tensor, extrema_actions_tensor, extrema_rewards_tensor,
|
|
||||||
extrema_next_states_tensor, extrema_dones_tensor
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
extrema_loss = self._replay_standard(
|
|
||||||
extrema_states_tensor, extrema_actions_tensor, extrema_rewards_tensor,
|
|
||||||
extrema_next_states_tensor, extrema_dones_tensor
|
|
||||||
)
|
|
||||||
|
|
||||||
# Restore original learning rate
|
|
||||||
for param_group in self.optimizer.param_groups:
|
|
||||||
param_group['lr'] = original_lr
|
|
||||||
|
|
||||||
logger.info(f"Extra training on extrema points: loss={extrema_loss:.4f}")
|
|
||||||
|
|
||||||
# Average the loss
|
|
||||||
loss = (loss + extrema_loss) / 2
|
|
||||||
|
|
||||||
# Occasionally train specifically on price movement data
|
|
||||||
elif training_focus >= 0.3 and training_focus < 0.6 and hasattr(self, 'price_movement_memory') and len(self.price_movement_memory) >= self.batch_size // 2:
|
|
||||||
# Sample from price movement memory
|
|
||||||
price_batch_size = min(self.batch_size // 2, len(self.price_movement_memory))
|
|
||||||
price_batch = random.sample(self.price_movement_memory, price_batch_size)
|
|
||||||
|
|
||||||
# Extract batches with proper tensor conversion
|
|
||||||
price_states = np.vstack([self._normalize_state(x[0]) for x in price_batch])
|
|
||||||
price_actions = np.array([x[1] for x in price_batch])
|
|
||||||
price_rewards = np.array([x[2] for x in price_batch])
|
|
||||||
price_next_states = np.vstack([self._normalize_state(x[3]) for x in price_batch])
|
|
||||||
price_dones = np.array([x[4] for x in price_batch], dtype=np.float32)
|
|
||||||
|
|
||||||
# Convert to torch tensors and move to device
|
|
||||||
price_states_tensor = torch.FloatTensor(price_states).to(self.device)
|
|
||||||
price_actions_tensor = torch.LongTensor(price_actions).to(self.device)
|
|
||||||
price_rewards_tensor = torch.FloatTensor(price_rewards).to(self.device)
|
|
||||||
price_next_states_tensor = torch.FloatTensor(price_next_states).to(self.device)
|
|
||||||
price_dones_tensor = torch.FloatTensor(price_dones).to(self.device)
|
|
||||||
|
|
||||||
# Additional training step focused on price movements (with smaller learning rate)
|
|
||||||
original_lr = self.optimizer.param_groups[0]['lr']
|
|
||||||
# Temporarily reduce learning rate
|
|
||||||
for param_group in self.optimizer.param_groups:
|
|
||||||
param_group['lr'] = original_lr * 0.5
|
|
||||||
|
|
||||||
# Train on price movement data
|
|
||||||
if self.use_mixed_precision:
|
|
||||||
price_loss = self._replay_mixed_precision(
|
|
||||||
price_states_tensor, price_actions_tensor, price_rewards_tensor,
|
|
||||||
price_next_states_tensor, price_dones_tensor
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
price_loss = self._replay_standard(
|
|
||||||
price_states_tensor, price_actions_tensor, price_rewards_tensor,
|
|
||||||
price_next_states_tensor, price_dones_tensor
|
|
||||||
)
|
|
||||||
|
|
||||||
# Restore original learning rate
|
|
||||||
for param_group in self.optimizer.param_groups:
|
|
||||||
param_group['lr'] = original_lr
|
|
||||||
|
|
||||||
logger.info(f"Extra training on price movement data: loss={price_loss:.4f}")
|
|
||||||
|
|
||||||
# Average the loss
|
|
||||||
loss = (loss + price_loss) / 2
|
|
||||||
|
|
||||||
# Store and return loss
|
|
||||||
self.losses.append(loss)
|
self.losses.append(loss)
|
||||||
return loss
|
|
||||||
|
|
||||||
def _replay_standard(self, states, actions, rewards, next_states, dones):
|
|
||||||
"""Standard precision training step"""
|
|
||||||
# Zero gradients
|
|
||||||
self.optimizer.zero_grad()
|
|
||||||
|
|
||||||
# Get current Q values and extrema predictions
|
|
||||||
current_q_values, current_extrema_pred, current_price_pred = self.policy_net(states)
|
|
||||||
current_q_values = current_q_values.gather(1, actions.unsqueeze(1)).squeeze(1)
|
|
||||||
|
|
||||||
# Get next Q values from target network
|
|
||||||
with torch.no_grad():
|
|
||||||
next_q_values, next_extrema_pred, next_price_pred = self.target_net(next_states)
|
|
||||||
next_q_values = next_q_values.max(1)[0]
|
|
||||||
|
|
||||||
# Check for dimension mismatch and fix it
|
|
||||||
if rewards.shape[0] != next_q_values.shape[0]:
|
|
||||||
# Log the shape mismatch for debugging
|
|
||||||
logger.warning(f"Shape mismatch detected in standard replay: rewards {rewards.shape}, next_q_values {next_q_values.shape}")
|
|
||||||
# Use the smaller size to prevent index errors
|
|
||||||
min_size = min(rewards.shape[0], next_q_values.shape[0])
|
|
||||||
rewards = rewards[:min_size]
|
|
||||||
dones = dones[:min_size]
|
|
||||||
next_q_values = next_q_values[:min_size]
|
|
||||||
current_q_values = current_q_values[:min_size]
|
|
||||||
|
|
||||||
target_q_values = rewards + (1 - dones) * self.gamma * next_q_values
|
|
||||||
|
|
||||||
# Compute Q-value loss (primary task)
|
|
||||||
q_loss = nn.MSELoss()(current_q_values, target_q_values)
|
|
||||||
|
|
||||||
# Initialize combined loss with Q-value loss
|
|
||||||
loss = q_loss
|
|
||||||
|
|
||||||
# Try to extract price from current and next states
|
|
||||||
try:
|
|
||||||
# Extract price feature from sequence data (if available)
|
|
||||||
if len(states.shape) == 3: # [batch, seq, features]
|
|
||||||
current_prices = states[:, -1, -1] # Last timestep, last feature
|
|
||||||
next_prices = next_states[:, -1, -1]
|
|
||||||
else: # [batch, features]
|
|
||||||
current_prices = states[:, -1] # Last feature
|
|
||||||
next_prices = next_states[:, -1]
|
|
||||||
|
|
||||||
# Compute price changes for different timeframes
|
|
||||||
immediate_changes = (next_prices - current_prices) / current_prices
|
|
||||||
|
|
||||||
# Create price direction labels - simplified for training
|
|
||||||
# 0 = down, 1 = sideways, 2 = up
|
|
||||||
immediate_labels = torch.ones(min_size, dtype=torch.long, device=self.device) * 1 # Default: sideways
|
|
||||||
midterm_labels = torch.ones(min_size, dtype=torch.long, device=self.device) * 1
|
|
||||||
longterm_labels = torch.ones(min_size, dtype=torch.long, device=self.device) * 1
|
|
||||||
|
|
||||||
# Immediate term direction (1s, 1m)
|
|
||||||
immediate_up = (immediate_changes > 0.0005)
|
|
||||||
immediate_down = (immediate_changes < -0.0005)
|
|
||||||
immediate_labels[immediate_up] = 2 # Up
|
|
||||||
immediate_labels[immediate_down] = 0 # Down
|
|
||||||
|
|
||||||
# For mid and long term, we can only approximate during training
|
|
||||||
# In a real system, we'd need historical data to validate these
|
|
||||||
# Here we'll use the immediate term with increasing thresholds as approximation
|
|
||||||
|
|
||||||
# Mid-term (1h) - use slightly higher threshold
|
|
||||||
midterm_up = (immediate_changes > 0.001)
|
|
||||||
midterm_down = (immediate_changes < -0.001)
|
|
||||||
midterm_labels[midterm_up] = 2 # Up
|
|
||||||
midterm_labels[midterm_down] = 0 # Down
|
|
||||||
|
|
||||||
# Long-term (1d) - use even higher threshold
|
|
||||||
longterm_up = (immediate_changes > 0.002)
|
|
||||||
longterm_down = (immediate_changes < -0.002)
|
|
||||||
longterm_labels[longterm_up] = 2 # Up
|
|
||||||
longterm_labels[longterm_down] = 0 # Down
|
|
||||||
|
|
||||||
# Generate target values for price change regression
|
|
||||||
# For simplicity, we'll use the immediate change and scaled versions for longer timeframes
|
|
||||||
price_value_targets = torch.zeros((min_size, 4), device=self.device)
|
|
||||||
price_value_targets[:, 0] = immediate_changes
|
|
||||||
price_value_targets[:, 1] = immediate_changes * 2.0 # Approximate 1h change
|
|
||||||
price_value_targets[:, 2] = immediate_changes * 4.0 # Approximate 1d change
|
|
||||||
price_value_targets[:, 3] = immediate_changes * 6.0 # Approximate 1w change
|
|
||||||
|
|
||||||
# Calculate loss for price direction prediction (classification)
|
|
||||||
if len(current_price_pred['immediate'].shape) > 1 and current_price_pred['immediate'].shape[0] >= min_size:
|
|
||||||
# Slice predictions to match the adjusted batch size
|
|
||||||
immediate_pred = current_price_pred['immediate'][:min_size]
|
|
||||||
midterm_pred = current_price_pred['midterm'][:min_size]
|
|
||||||
longterm_pred = current_price_pred['longterm'][:min_size]
|
|
||||||
price_values_pred = current_price_pred['values'][:min_size]
|
|
||||||
|
|
||||||
# Compute losses for each task
|
|
||||||
immediate_loss = nn.CrossEntropyLoss()(immediate_pred, immediate_labels)
|
|
||||||
midterm_loss = nn.CrossEntropyLoss()(midterm_pred, midterm_labels)
|
|
||||||
longterm_loss = nn.CrossEntropyLoss()(longterm_pred, longterm_labels)
|
|
||||||
|
|
||||||
# MSE loss for price value regression
|
|
||||||
price_value_loss = nn.MSELoss()(price_values_pred, price_value_targets)
|
|
||||||
|
|
||||||
# Combine all price prediction losses
|
|
||||||
price_loss = immediate_loss + 0.7 * midterm_loss + 0.5 * longterm_loss + 0.3 * price_value_loss
|
|
||||||
|
|
||||||
# Create extrema labels (same as before)
|
|
||||||
extrema_labels = torch.ones(min_size, dtype=torch.long, device=self.device) * 2 # Default: neither
|
|
||||||
|
|
||||||
# Identify potential bottoms (significant negative change)
|
|
||||||
bottoms = (immediate_changes < -0.003)
|
|
||||||
extrema_labels[bottoms] = 0
|
|
||||||
|
|
||||||
# Identify potential tops (significant positive change)
|
|
||||||
tops = (immediate_changes > 0.003)
|
|
||||||
extrema_labels[tops] = 1
|
|
||||||
|
|
||||||
# Calculate extrema prediction loss
|
|
||||||
if len(current_extrema_pred.shape) > 1 and current_extrema_pred.shape[0] >= min_size:
|
|
||||||
current_extrema_pred = current_extrema_pred[:min_size]
|
|
||||||
extrema_loss = nn.CrossEntropyLoss()(current_extrema_pred, extrema_labels)
|
|
||||||
|
|
||||||
# Combined loss with all components
|
|
||||||
# Primary task: Q-value learning (RL objective)
|
|
||||||
# Secondary tasks: extrema detection and price prediction (supervised objectives)
|
|
||||||
loss = q_loss + 0.3 * extrema_loss + 0.3 * price_loss
|
|
||||||
|
|
||||||
# Log loss components occasionally
|
|
||||||
if random.random() < 0.01: # Log 1% of the time
|
|
||||||
logger.info(
|
|
||||||
f"Training losses: Q-loss={q_loss.item():.4f}, "
|
|
||||||
f"Extrema-loss={extrema_loss.item():.4f}, "
|
|
||||||
f"Price-loss={price_loss.item():.4f}, "
|
|
||||||
f"Imm-loss={immediate_loss.item():.4f}, "
|
|
||||||
f"Mid-loss={midterm_loss.item():.4f}, "
|
|
||||||
f"Long-loss={longterm_loss.item():.4f}"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
# Fallback if price extraction fails
|
|
||||||
logger.warning(f"Failed to calculate price prediction loss: {str(e)}. Using only Q-value loss.")
|
|
||||||
# Just use Q-value loss
|
|
||||||
loss = q_loss
|
|
||||||
|
|
||||||
# Backward pass and optimize
|
|
||||||
loss.backward()
|
|
||||||
|
|
||||||
# Gradient clipping to prevent exploding gradients
|
|
||||||
torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
|
|
||||||
self.optimizer.step()
|
|
||||||
|
|
||||||
# Update target network if needed
|
|
||||||
self.update_count += 1
|
|
||||||
if self.update_count % self.target_update == 0:
|
|
||||||
self.target_net.load_state_dict(self.policy_net.state_dict())
|
|
||||||
|
|
||||||
# Track and decay epsilon
|
# Track and decay epsilon
|
||||||
self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)
|
self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)
|
||||||
|
|
||||||
return loss.item()
|
# Randomly decide if we should train on extrema points from special memory
|
||||||
|
if random.random() < 0.3 and len(self.extrema_memory) >= self.batch_size:
|
||||||
|
# Train specifically on extrema memory examples
|
||||||
|
extrema_indices = np.random.choice(len(self.extrema_memory), size=min(self.batch_size, len(self.extrema_memory)), replace=False)
|
||||||
|
extrema_batch = [self.extrema_memory[i] for i in extrema_indices]
|
||||||
|
|
||||||
|
# Extract tensors from extrema batch
|
||||||
|
extrema_states = torch.FloatTensor(np.array([e[0] for e in extrema_batch])).to(self.device)
|
||||||
|
extrema_actions = torch.LongTensor(np.array([e[1] for e in extrema_batch])).to(self.device)
|
||||||
|
extrema_rewards = torch.FloatTensor(np.array([e[2] for e in extrema_batch])).to(self.device)
|
||||||
|
extrema_next_states = torch.FloatTensor(np.array([e[3] for e in extrema_batch])).to(self.device)
|
||||||
|
extrema_dones = torch.FloatTensor(np.array([e[4] for e in extrema_batch])).to(self.device)
|
||||||
|
|
||||||
|
# Use a slightly reduced learning rate for extrema training
|
||||||
|
old_lr = self.optimizer.param_groups[0]['lr']
|
||||||
|
self.optimizer.param_groups[0]['lr'] = old_lr * 0.8
|
||||||
|
|
||||||
|
# Train on extrema memory
|
||||||
|
if self.use_mixed_precision:
|
||||||
|
extrema_loss = self._replay_mixed_precision(extrema_states, extrema_actions, extrema_rewards, extrema_next_states, extrema_dones)
|
||||||
|
else:
|
||||||
|
extrema_loss = self._replay_standard(extrema_batch)
|
||||||
|
|
||||||
|
# Reset learning rate
|
||||||
|
self.optimizer.param_groups[0]['lr'] = old_lr
|
||||||
|
|
||||||
|
# Log extrema loss
|
||||||
|
logger.info(f"Extra training on extrema points, loss: {extrema_loss:.4f}")
|
||||||
|
|
||||||
|
# Randomly train on price movement examples (similar to extrema)
|
||||||
|
if random.random() < 0.3 and len(self.price_movement_memory) >= self.batch_size:
|
||||||
|
# Train specifically on price movement memory examples
|
||||||
|
price_indices = np.random.choice(len(self.price_movement_memory), size=min(self.batch_size, len(self.price_movement_memory)), replace=False)
|
||||||
|
price_batch = [self.price_movement_memory[i] for i in price_indices]
|
||||||
|
|
||||||
|
# Extract tensors from price movement batch
|
||||||
|
price_states = torch.FloatTensor(np.array([e[0] for e in price_batch])).to(self.device)
|
||||||
|
price_actions = torch.LongTensor(np.array([e[1] for e in price_batch])).to(self.device)
|
||||||
|
price_rewards = torch.FloatTensor(np.array([e[2] for e in price_batch])).to(self.device)
|
||||||
|
price_next_states = torch.FloatTensor(np.array([e[3] for e in price_batch])).to(self.device)
|
||||||
|
price_dones = torch.FloatTensor(np.array([e[4] for e in price_batch])).to(self.device)
|
||||||
|
|
||||||
|
# Use a slightly reduced learning rate for price movement training
|
||||||
|
old_lr = self.optimizer.param_groups[0]['lr']
|
||||||
|
self.optimizer.param_groups[0]['lr'] = old_lr * 0.75
|
||||||
|
|
||||||
|
# Train on price movement memory
|
||||||
|
if self.use_mixed_precision:
|
||||||
|
price_loss = self._replay_mixed_precision(price_states, price_actions, price_rewards, price_next_states, price_dones)
|
||||||
|
else:
|
||||||
|
price_loss = self._replay_standard(price_batch)
|
||||||
|
|
||||||
|
# Reset learning rate
|
||||||
|
self.optimizer.param_groups[0]['lr'] = old_lr
|
||||||
|
|
||||||
|
# Log price movement loss
|
||||||
|
logger.info(f"Extra training on price movement examples, loss: {price_loss:.4f}")
|
||||||
|
|
||||||
|
return loss
|
||||||
|
|
||||||
|
def _replay_standard(self, experiences=None):
|
||||||
|
"""Standard training step without mixed precision"""
|
||||||
|
try:
|
||||||
|
# Use experiences if provided, otherwise sample from memory
|
||||||
|
if experiences is None:
|
||||||
|
# If memory is too small, skip training
|
||||||
|
if len(self.memory) < self.batch_size:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
# Sample random mini-batch from memory
|
||||||
|
indices = np.random.choice(len(self.memory), size=min(self.batch_size, len(self.memory)), replace=False)
|
||||||
|
batch = [self.memory[i] for i in indices]
|
||||||
|
experiences = batch
|
||||||
|
|
||||||
|
# Unpack experiences
|
||||||
|
states, actions, rewards, next_states, dones = zip(*experiences)
|
||||||
|
|
||||||
|
# Convert to PyTorch tensors
|
||||||
|
states = torch.FloatTensor(np.array(states)).to(self.device)
|
||||||
|
actions = torch.LongTensor(np.array(actions)).to(self.device)
|
||||||
|
rewards = torch.FloatTensor(np.array(rewards)).to(self.device)
|
||||||
|
next_states = torch.FloatTensor(np.array(next_states)).to(self.device)
|
||||||
|
dones = torch.FloatTensor(np.array(dones)).to(self.device)
|
||||||
|
|
||||||
|
# Get current Q values
|
||||||
|
current_q_values, current_extrema_pred, current_price_pred, hidden_features = self.policy_net(states)
|
||||||
|
current_q_values = current_q_values.gather(1, actions.unsqueeze(1)).squeeze(1)
|
||||||
|
|
||||||
|
# Get next Q values with target network
|
||||||
|
with torch.no_grad():
|
||||||
|
next_q_values, next_extrema_pred, next_price_pred, next_hidden_features = self.target_net(next_states)
|
||||||
|
next_q_values = next_q_values.max(1)[0]
|
||||||
|
|
||||||
|
# Check for dimension mismatch between rewards and next_q_values
|
||||||
|
if rewards.shape[0] != next_q_values.shape[0]:
|
||||||
|
logger.warning(f"Shape mismatch detected in standard replay: rewards {rewards.shape}, next_q_values {next_q_values.shape}")
|
||||||
|
# Use the smaller size to prevent index error
|
||||||
|
min_size = min(rewards.shape[0], next_q_values.shape[0])
|
||||||
|
rewards = rewards[:min_size]
|
||||||
|
dones = dones[:min_size]
|
||||||
|
next_q_values = next_q_values[:min_size]
|
||||||
|
current_q_values = current_q_values[:min_size]
|
||||||
|
|
||||||
|
# Calculate target Q values
|
||||||
|
target_q_values = rewards + (1 - dones) * self.gamma * next_q_values
|
||||||
|
|
||||||
|
# Compute loss for Q value
|
||||||
|
q_loss = self.criterion(current_q_values, target_q_values)
|
||||||
|
|
||||||
|
# Try to compute extrema loss if possible
|
||||||
|
try:
|
||||||
|
# Get the target classes from extrema predictions
|
||||||
|
extrema_targets = torch.argmax(current_extrema_pred, dim=1).long()
|
||||||
|
|
||||||
|
# Compute extrema loss using cross-entropy - this is an auxiliary task
|
||||||
|
extrema_loss = F.cross_entropy(current_extrema_pred, extrema_targets)
|
||||||
|
|
||||||
|
# Combined loss with emphasis on Q-learning
|
||||||
|
total_loss = q_loss + 0.1 * extrema_loss
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Failed to calculate extrema loss: {str(e)}. Using only Q-value loss.")
|
||||||
|
total_loss = q_loss
|
||||||
|
|
||||||
|
# Reset gradients
|
||||||
|
self.optimizer.zero_grad()
|
||||||
|
|
||||||
|
# Backward pass
|
||||||
|
total_loss.backward()
|
||||||
|
|
||||||
|
# Clip gradients to avoid exploding gradients
|
||||||
|
torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
|
||||||
|
|
||||||
|
# Update weights
|
||||||
|
self.optimizer.step()
|
||||||
|
|
||||||
|
# Update target network if needed
|
||||||
|
self.update_count += 1
|
||||||
|
if self.update_count % self.target_update == 0:
|
||||||
|
self.target_net.load_state_dict(self.policy_net.state_dict())
|
||||||
|
|
||||||
|
# Return loss
|
||||||
|
return total_loss.item()
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in replay standard: {str(e)}")
|
||||||
|
import traceback
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
return 0.0
|
||||||
|
|
||||||
def _replay_mixed_precision(self, states, actions, rewards, next_states, dones):
|
def _replay_mixed_precision(self, states, actions, rewards, next_states, dones):
|
||||||
"""Mixed precision training step for better GPU performance"""
|
"""Mixed precision training step for better GPU performance"""
|
||||||
@ -696,12 +727,12 @@ class DQNAgent:
|
|||||||
# Forward pass with amp autocasting
|
# Forward pass with amp autocasting
|
||||||
with torch.cuda.amp.autocast():
|
with torch.cuda.amp.autocast():
|
||||||
# Get current Q values and extrema predictions
|
# Get current Q values and extrema predictions
|
||||||
current_q_values, current_extrema_pred, current_price_pred = self.policy_net(states)
|
current_q_values, current_extrema_pred, current_price_pred, hidden_features = self.policy_net(states)
|
||||||
current_q_values = current_q_values.gather(1, actions.unsqueeze(1)).squeeze(1)
|
current_q_values = current_q_values.gather(1, actions.unsqueeze(1)).squeeze(1)
|
||||||
|
|
||||||
# Get next Q values from target network
|
# Get next Q values from target network
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
next_q_values, next_extrema_pred, next_price_pred = self.target_net(next_states)
|
next_q_values, next_extrema_pred, next_price_pred, next_hidden_features = self.target_net(next_states)
|
||||||
next_q_values = next_q_values.max(1)[0]
|
next_q_values = next_q_values.max(1)[0]
|
||||||
|
|
||||||
# Check for dimension mismatch and fix it
|
# Check for dimension mismatch and fix it
|
||||||
@ -733,7 +764,7 @@ class DQNAgent:
|
|||||||
current_prices = states[:, -1] # Last feature
|
current_prices = states[:, -1] # Last feature
|
||||||
next_prices = next_states[:, -1]
|
next_prices = next_states[:, -1]
|
||||||
|
|
||||||
# Compute price changes for different timeframes
|
# Calculate price change for different timeframes
|
||||||
immediate_changes = (next_prices - current_prices) / current_prices
|
immediate_changes = (next_prices - current_prices) / current_prices
|
||||||
|
|
||||||
# Create price direction labels - simplified for training
|
# Create price direction labels - simplified for training
|
||||||
|
329
NN/models/dqn_agent_enhanced.py
Normal file
329
NN/models/dqn_agent_enhanced.py
Normal file
@ -0,0 +1,329 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
import numpy as np
|
||||||
|
from collections import deque
|
||||||
|
import random
|
||||||
|
from typing import Tuple, List
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
# Add parent directory to path
|
||||||
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||||
|
|
||||||
|
# Import the EnhancedCNN model
|
||||||
|
from NN.models.enhanced_cnn import EnhancedCNN, ExampleSiftingDataset
|
||||||
|
|
||||||
|
# Configure logger
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class EnhancedDQNAgent:
|
||||||
|
"""
|
||||||
|
Enhanced Deep Q-Network agent for trading
|
||||||
|
Uses the improved EnhancedCNN model with residual connections and attention mechanisms
|
||||||
|
"""
|
||||||
|
def __init__(self,
|
||||||
|
state_shape: Tuple[int, ...],
|
||||||
|
n_actions: int,
|
||||||
|
learning_rate: float = 0.0003, # Slightly reduced learning rate for stability
|
||||||
|
gamma: float = 0.95, # Discount factor
|
||||||
|
epsilon: float = 1.0,
|
||||||
|
epsilon_min: float = 0.05,
|
||||||
|
epsilon_decay: float = 0.995, # Slower decay for more exploration
|
||||||
|
buffer_size: int = 50000, # Larger memory buffer
|
||||||
|
batch_size: int = 128, # Larger batch size
|
||||||
|
target_update: int = 10, # More frequent target updates
|
||||||
|
confidence_threshold: float = 0.4, # Lower confidence threshold
|
||||||
|
device=None):
|
||||||
|
|
||||||
|
# Extract state dimensions
|
||||||
|
if isinstance(state_shape, tuple) and len(state_shape) > 1:
|
||||||
|
# Multi-dimensional state (like image or sequence)
|
||||||
|
self.state_dim = state_shape
|
||||||
|
else:
|
||||||
|
# 1D state
|
||||||
|
if isinstance(state_shape, tuple):
|
||||||
|
self.state_dim = state_shape[0]
|
||||||
|
else:
|
||||||
|
self.state_dim = state_shape
|
||||||
|
|
||||||
|
# Store parameters
|
||||||
|
self.n_actions = n_actions
|
||||||
|
self.learning_rate = learning_rate
|
||||||
|
self.gamma = gamma
|
||||||
|
self.epsilon = epsilon
|
||||||
|
self.epsilon_min = epsilon_min
|
||||||
|
self.epsilon_decay = epsilon_decay
|
||||||
|
self.buffer_size = buffer_size
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.target_update = target_update
|
||||||
|
self.confidence_threshold = confidence_threshold
|
||||||
|
|
||||||
|
# Set device for computation
|
||||||
|
if device is None:
|
||||||
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||||
|
else:
|
||||||
|
self.device = device
|
||||||
|
|
||||||
|
# Initialize models with the enhanced CNN
|
||||||
|
self.policy_net = EnhancedCNN(self.state_dim, self.n_actions, self.confidence_threshold)
|
||||||
|
self.target_net = EnhancedCNN(self.state_dim, self.n_actions, self.confidence_threshold)
|
||||||
|
|
||||||
|
# Initialize the target network with the same weights as the policy network
|
||||||
|
self.target_net.load_state_dict(self.policy_net.state_dict())
|
||||||
|
|
||||||
|
# Set models to eval mode (important for batch norm, dropout)
|
||||||
|
self.target_net.eval()
|
||||||
|
|
||||||
|
# Optimization components
|
||||||
|
self.optimizer = optim.Adam(self.policy_net.parameters(), lr=self.learning_rate)
|
||||||
|
self.criterion = nn.MSELoss()
|
||||||
|
|
||||||
|
# Experience replay memory with example sifting
|
||||||
|
self.memory = ExampleSiftingDataset(max_examples=buffer_size)
|
||||||
|
self.update_count = 0
|
||||||
|
|
||||||
|
# Confidence tracking
|
||||||
|
self.confidence_history = []
|
||||||
|
self.avg_confidence = 0.0
|
||||||
|
self.max_confidence = 0.0
|
||||||
|
self.min_confidence = 1.0
|
||||||
|
|
||||||
|
# Performance tracking
|
||||||
|
self.losses = []
|
||||||
|
self.rewards = []
|
||||||
|
self.avg_reward = 0.0
|
||||||
|
|
||||||
|
# Check if mixed precision training should be used
|
||||||
|
self.use_mixed_precision = False
|
||||||
|
if torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and 'DISABLE_MIXED_PRECISION' not in os.environ:
|
||||||
|
self.use_mixed_precision = True
|
||||||
|
self.scaler = torch.cuda.amp.GradScaler()
|
||||||
|
logger.info("Mixed precision training enabled")
|
||||||
|
else:
|
||||||
|
logger.info("Mixed precision training disabled")
|
||||||
|
|
||||||
|
# For compatibility with old code
|
||||||
|
self.action_size = n_actions
|
||||||
|
|
||||||
|
logger.info(f"Enhanced DQN Agent using device: {self.device}")
|
||||||
|
logger.info(f"Confidence threshold set to {self.confidence_threshold}")
|
||||||
|
|
||||||
|
def move_models_to_device(self, device=None):
|
||||||
|
"""Move models to the specified device (GPU/CPU)"""
|
||||||
|
if device is not None:
|
||||||
|
self.device = device
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.policy_net = self.policy_net.to(self.device)
|
||||||
|
self.target_net = self.target_net.to(self.device)
|
||||||
|
logger.info(f"Moved models to {self.device}")
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to move models to {self.device}: {str(e)}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _normalize_state(self, state):
|
||||||
|
"""Normalize state for better training stability"""
|
||||||
|
try:
|
||||||
|
# Convert to numpy array if needed
|
||||||
|
if isinstance(state, list):
|
||||||
|
state = np.array(state, dtype=np.float32)
|
||||||
|
|
||||||
|
# Apply normalization based on state shape
|
||||||
|
if len(state.shape) > 1:
|
||||||
|
# Multi-dimensional state - normalize each feature dimension separately
|
||||||
|
for i in range(state.shape[0]):
|
||||||
|
# Skip if all zeros (to avoid division by zero)
|
||||||
|
if np.sum(np.abs(state[i])) > 0:
|
||||||
|
# Standardize each feature dimension
|
||||||
|
mean = np.mean(state[i])
|
||||||
|
std = np.std(state[i])
|
||||||
|
if std > 0:
|
||||||
|
state[i] = (state[i] - mean) / std
|
||||||
|
else:
|
||||||
|
# 1D state vector
|
||||||
|
# Skip if all zeros
|
||||||
|
if np.sum(np.abs(state)) > 0:
|
||||||
|
mean = np.mean(state)
|
||||||
|
std = np.std(state)
|
||||||
|
if std > 0:
|
||||||
|
state = (state - mean) / std
|
||||||
|
|
||||||
|
return state
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error normalizing state: {str(e)}")
|
||||||
|
return state
|
||||||
|
|
||||||
|
def remember(self, state, action, reward, next_state, done):
|
||||||
|
"""Store experience in memory with example sifting"""
|
||||||
|
self.memory.add_example(state, action, reward, next_state, done)
|
||||||
|
|
||||||
|
# Also track rewards for monitoring
|
||||||
|
self.rewards.append(reward)
|
||||||
|
if len(self.rewards) > 100:
|
||||||
|
self.rewards = self.rewards[-100:]
|
||||||
|
self.avg_reward = np.mean(self.rewards)
|
||||||
|
|
||||||
|
def act(self, state, explore=True):
|
||||||
|
"""Choose action using epsilon-greedy policy with built-in confidence thresholding"""
|
||||||
|
if explore and random.random() < self.epsilon:
|
||||||
|
return random.randrange(self.n_actions), 0.0 # Return action and zero confidence
|
||||||
|
|
||||||
|
# Normalize state before inference
|
||||||
|
normalized_state = self._normalize_state(state)
|
||||||
|
|
||||||
|
# Use the EnhancedCNN's act method which includes confidence thresholding
|
||||||
|
action, confidence = self.policy_net.act(normalized_state, explore=explore)
|
||||||
|
|
||||||
|
# Track confidence metrics
|
||||||
|
self.confidence_history.append(confidence)
|
||||||
|
if len(self.confidence_history) > 100:
|
||||||
|
self.confidence_history = self.confidence_history[-100:]
|
||||||
|
|
||||||
|
# Update confidence metrics
|
||||||
|
self.avg_confidence = sum(self.confidence_history) / len(self.confidence_history)
|
||||||
|
self.max_confidence = max(self.max_confidence, confidence)
|
||||||
|
self.min_confidence = min(self.min_confidence, confidence)
|
||||||
|
|
||||||
|
# Log average confidence occasionally
|
||||||
|
if random.random() < 0.01: # 1% of the time
|
||||||
|
logger.info(f"Confidence metrics - Current: {confidence:.4f}, Avg: {self.avg_confidence:.4f}, " +
|
||||||
|
f"Min: {self.min_confidence:.4f}, Max: {self.max_confidence:.4f}")
|
||||||
|
|
||||||
|
return action, confidence
|
||||||
|
|
||||||
|
def replay(self):
|
||||||
|
"""Train the model using experience replay with high-quality examples"""
|
||||||
|
# Check if enough samples in memory
|
||||||
|
if len(self.memory) < self.batch_size:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
# Get batch of experiences
|
||||||
|
batch = self.memory.get_batch(self.batch_size)
|
||||||
|
if batch is None:
|
||||||
|
return 0.0
|
||||||
|
|
||||||
|
states = torch.FloatTensor(batch['states']).to(self.device)
|
||||||
|
actions = torch.LongTensor(batch['actions']).to(self.device)
|
||||||
|
rewards = torch.FloatTensor(batch['rewards']).to(self.device)
|
||||||
|
next_states = torch.FloatTensor(batch['next_states']).to(self.device)
|
||||||
|
dones = torch.FloatTensor(batch['dones']).to(self.device)
|
||||||
|
|
||||||
|
# Compute Q values
|
||||||
|
self.policy_net.train() # Set to training mode
|
||||||
|
|
||||||
|
# Get current Q values
|
||||||
|
if self.use_mixed_precision:
|
||||||
|
with torch.cuda.amp.autocast():
|
||||||
|
# Get current Q values
|
||||||
|
q_values, _, _, _ = self.policy_net(states)
|
||||||
|
current_q = q_values.gather(1, actions.unsqueeze(1)).squeeze(1)
|
||||||
|
|
||||||
|
# Compute target Q values
|
||||||
|
with torch.no_grad():
|
||||||
|
self.target_net.eval()
|
||||||
|
next_q_values, _, _, _ = self.target_net(next_states)
|
||||||
|
next_q = next_q_values.max(1)[0]
|
||||||
|
target_q = rewards + (1 - dones) * self.gamma * next_q
|
||||||
|
|
||||||
|
# Compute loss
|
||||||
|
loss = self.criterion(current_q, target_q)
|
||||||
|
|
||||||
|
# Perform backpropagation with mixed precision
|
||||||
|
self.optimizer.zero_grad()
|
||||||
|
self.scaler.scale(loss).backward()
|
||||||
|
self.scaler.unscale_(self.optimizer)
|
||||||
|
torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
|
||||||
|
self.scaler.step(self.optimizer)
|
||||||
|
self.scaler.update()
|
||||||
|
else:
|
||||||
|
# Standard precision training
|
||||||
|
# Get current Q values
|
||||||
|
q_values, _, _, _ = self.policy_net(states)
|
||||||
|
current_q = q_values.gather(1, actions.unsqueeze(1)).squeeze(1)
|
||||||
|
|
||||||
|
# Compute target Q values
|
||||||
|
with torch.no_grad():
|
||||||
|
self.target_net.eval()
|
||||||
|
next_q_values, _, _, _ = self.target_net(next_states)
|
||||||
|
next_q = next_q_values.max(1)[0]
|
||||||
|
target_q = rewards + (1 - dones) * self.gamma * next_q
|
||||||
|
|
||||||
|
# Compute loss
|
||||||
|
loss = self.criterion(current_q, target_q)
|
||||||
|
|
||||||
|
# Perform backpropagation
|
||||||
|
self.optimizer.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
torch.nn.utils.clip_grad_norm_(self.policy_net.parameters(), 1.0)
|
||||||
|
self.optimizer.step()
|
||||||
|
|
||||||
|
# Track loss
|
||||||
|
loss_value = loss.item()
|
||||||
|
self.losses.append(loss_value)
|
||||||
|
if len(self.losses) > 100:
|
||||||
|
self.losses = self.losses[-100:]
|
||||||
|
|
||||||
|
# Update target network
|
||||||
|
self.update_count += 1
|
||||||
|
if self.update_count % self.target_update == 0:
|
||||||
|
self.target_net.load_state_dict(self.policy_net.state_dict())
|
||||||
|
logger.info(f"Updated target network (step {self.update_count})")
|
||||||
|
|
||||||
|
# Decay epsilon
|
||||||
|
if self.epsilon > self.epsilon_min:
|
||||||
|
self.epsilon *= self.epsilon_decay
|
||||||
|
|
||||||
|
return loss_value
|
||||||
|
|
||||||
|
def save(self, path):
|
||||||
|
"""Save agent state and models"""
|
||||||
|
self.policy_net.save(f"{path}_policy")
|
||||||
|
self.target_net.save(f"{path}_target")
|
||||||
|
|
||||||
|
# Save agent state
|
||||||
|
torch.save({
|
||||||
|
'epsilon': self.epsilon,
|
||||||
|
'confidence_threshold': self.confidence_threshold,
|
||||||
|
'losses': self.losses,
|
||||||
|
'rewards': self.rewards,
|
||||||
|
'avg_reward': self.avg_reward,
|
||||||
|
'confidence_history': self.confidence_history,
|
||||||
|
'avg_confidence': self.avg_confidence,
|
||||||
|
'max_confidence': self.max_confidence,
|
||||||
|
'min_confidence': self.min_confidence,
|
||||||
|
'update_count': self.update_count
|
||||||
|
}, f"{path}_agent_state.pt")
|
||||||
|
|
||||||
|
logger.info(f"Agent state saved to {path}_agent_state.pt")
|
||||||
|
|
||||||
|
def load(self, path):
|
||||||
|
"""Load agent state and models"""
|
||||||
|
policy_loaded = self.policy_net.load(f"{path}_policy")
|
||||||
|
target_loaded = self.target_net.load(f"{path}_target")
|
||||||
|
|
||||||
|
# Load agent state if available
|
||||||
|
agent_state_path = f"{path}_agent_state.pt"
|
||||||
|
if os.path.exists(agent_state_path):
|
||||||
|
try:
|
||||||
|
state = torch.load(agent_state_path)
|
||||||
|
self.epsilon = state.get('epsilon', self.epsilon)
|
||||||
|
self.confidence_threshold = state.get('confidence_threshold', self.confidence_threshold)
|
||||||
|
self.policy_net.confidence_threshold = self.confidence_threshold
|
||||||
|
self.target_net.confidence_threshold = self.confidence_threshold
|
||||||
|
self.losses = state.get('losses', [])
|
||||||
|
self.rewards = state.get('rewards', [])
|
||||||
|
self.avg_reward = state.get('avg_reward', 0.0)
|
||||||
|
self.confidence_history = state.get('confidence_history', [])
|
||||||
|
self.avg_confidence = state.get('avg_confidence', 0.0)
|
||||||
|
self.max_confidence = state.get('max_confidence', 0.0)
|
||||||
|
self.min_confidence = state.get('min_confidence', 1.0)
|
||||||
|
self.update_count = state.get('update_count', 0)
|
||||||
|
logger.info(f"Agent state loaded from {agent_state_path}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading agent state: {str(e)}")
|
||||||
|
|
||||||
|
return policy_loaded and target_loaded
|
413
NN/models/enhanced_cnn.py
Normal file
413
NN/models/enhanced_cnn.py
Normal file
@ -0,0 +1,413 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
import numpy as np
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
import torch.nn.functional as F
|
||||||
|
from typing import List, Tuple, Dict, Any, Optional, Union
|
||||||
|
|
||||||
|
# Configure logger
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class ResidualBlock(nn.Module):
|
||||||
|
"""
|
||||||
|
Residual block with pre-activation (BatchNorm -> ReLU -> Conv)
|
||||||
|
"""
|
||||||
|
def __init__(self, in_channels, out_channels, stride=1):
|
||||||
|
super(ResidualBlock, self).__init__()
|
||||||
|
self.bn1 = nn.BatchNorm1d(in_channels)
|
||||||
|
self.conv1 = nn.Conv1d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||||
|
self.bn2 = nn.BatchNorm1d(out_channels)
|
||||||
|
self.conv2 = nn.Conv1d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
|
||||||
|
|
||||||
|
# Shortcut connection to match dimensions
|
||||||
|
self.shortcut = nn.Sequential()
|
||||||
|
if stride != 1 or in_channels != out_channels:
|
||||||
|
self.shortcut = nn.Sequential(
|
||||||
|
nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
out = F.relu(self.bn1(x))
|
||||||
|
shortcut = self.shortcut(out)
|
||||||
|
out = self.conv1(out)
|
||||||
|
out = self.conv2(F.relu(self.bn2(out)))
|
||||||
|
out += shortcut
|
||||||
|
return out
|
||||||
|
|
||||||
|
class SelfAttention(nn.Module):
|
||||||
|
"""
|
||||||
|
Self-attention mechanism for sequential data
|
||||||
|
"""
|
||||||
|
def __init__(self, dim):
|
||||||
|
super(SelfAttention, self).__init__()
|
||||||
|
self.query = nn.Linear(dim, dim)
|
||||||
|
self.key = nn.Linear(dim, dim)
|
||||||
|
self.value = nn.Linear(dim, dim)
|
||||||
|
self.scale = torch.sqrt(torch.tensor(dim, dtype=torch.float32))
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
# x shape: [batch_size, seq_len, dim]
|
||||||
|
batch_size, seq_len, dim = x.size()
|
||||||
|
|
||||||
|
q = self.query(x) # [batch_size, seq_len, dim]
|
||||||
|
k = self.key(x) # [batch_size, seq_len, dim]
|
||||||
|
v = self.value(x) # [batch_size, seq_len, dim]
|
||||||
|
|
||||||
|
# Calculate attention scores
|
||||||
|
scores = torch.matmul(q, k.transpose(-2, -1)) / self.scale # [batch_size, seq_len, seq_len]
|
||||||
|
|
||||||
|
# Apply softmax to get attention weights
|
||||||
|
attention = F.softmax(scores, dim=-1) # [batch_size, seq_len, seq_len]
|
||||||
|
|
||||||
|
# Apply attention to values
|
||||||
|
out = torch.matmul(attention, v) # [batch_size, seq_len, dim]
|
||||||
|
|
||||||
|
return out, attention
|
||||||
|
|
||||||
|
class EnhancedCNN(nn.Module):
|
||||||
|
"""
|
||||||
|
Enhanced CNN model with residual connections and attention mechanisms
|
||||||
|
for improved trading decision making
|
||||||
|
"""
|
||||||
|
def __init__(self, input_shape, n_actions, confidence_threshold=0.5):
|
||||||
|
super(EnhancedCNN, self).__init__()
|
||||||
|
|
||||||
|
# Store dimensions
|
||||||
|
self.input_shape = input_shape
|
||||||
|
self.n_actions = n_actions
|
||||||
|
self.confidence_threshold = confidence_threshold
|
||||||
|
|
||||||
|
# Calculate input dimensions
|
||||||
|
if isinstance(input_shape, (list, tuple)):
|
||||||
|
if len(input_shape) == 3: # [channels, height, width]
|
||||||
|
self.channels, self.height, self.width = input_shape
|
||||||
|
self.feature_dim = self.height * self.width
|
||||||
|
elif len(input_shape) == 2: # [timeframes, features]
|
||||||
|
self.channels = input_shape[0]
|
||||||
|
self.features = input_shape[1]
|
||||||
|
self.feature_dim = self.features * self.channels
|
||||||
|
elif len(input_shape) == 1: # [features]
|
||||||
|
self.channels = 1
|
||||||
|
self.features = input_shape[0]
|
||||||
|
self.feature_dim = self.features
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported input shape: {input_shape}")
|
||||||
|
else: # single integer
|
||||||
|
self.channels = 1
|
||||||
|
self.features = input_shape
|
||||||
|
self.feature_dim = input_shape
|
||||||
|
|
||||||
|
# Build network
|
||||||
|
self._build_network()
|
||||||
|
|
||||||
|
# Initialize device
|
||||||
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||||
|
self.to(self.device)
|
||||||
|
|
||||||
|
logger.info(f"EnhancedCNN initialized with input shape: {input_shape}, actions: {n_actions}")
|
||||||
|
|
||||||
|
def _build_network(self):
|
||||||
|
"""Build the enhanced neural network with current feature dimensions"""
|
||||||
|
|
||||||
|
# 1D CNN for sequential data
|
||||||
|
if self.channels > 1:
|
||||||
|
# Reshape expected: [batch, timeframes, features]
|
||||||
|
self.conv_layers = nn.Sequential(
|
||||||
|
nn.Conv1d(self.channels, 64, kernel_size=3, padding=1),
|
||||||
|
nn.BatchNorm1d(64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.2),
|
||||||
|
|
||||||
|
ResidualBlock(64, 128),
|
||||||
|
nn.MaxPool1d(kernel_size=2, stride=2),
|
||||||
|
nn.Dropout(0.3),
|
||||||
|
|
||||||
|
ResidualBlock(128, 256),
|
||||||
|
nn.MaxPool1d(kernel_size=2, stride=2),
|
||||||
|
nn.Dropout(0.4),
|
||||||
|
|
||||||
|
ResidualBlock(256, 512),
|
||||||
|
nn.AdaptiveAvgPool1d(1) # Global average pooling
|
||||||
|
)
|
||||||
|
# Feature dimension after conv layers
|
||||||
|
self.conv_features = 512
|
||||||
|
else:
|
||||||
|
# For 1D vectors, skip the convolutional part
|
||||||
|
self.conv_layers = None
|
||||||
|
self.conv_features = 0
|
||||||
|
|
||||||
|
# Fully connected layers for all cases
|
||||||
|
# We'll use deeper layers with skip connections
|
||||||
|
if self.conv_layers is None:
|
||||||
|
# For 1D inputs without conv preprocessing
|
||||||
|
self.fc1 = nn.Linear(self.feature_dim, 512)
|
||||||
|
self.features_dim = 512
|
||||||
|
else:
|
||||||
|
# For data processed by conv layers
|
||||||
|
self.fc1 = nn.Linear(self.conv_features, 512)
|
||||||
|
self.features_dim = 512
|
||||||
|
|
||||||
|
# Common feature extraction layers
|
||||||
|
self.fc_layers = nn.Sequential(
|
||||||
|
self.fc1,
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.4),
|
||||||
|
nn.Linear(512, 512),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.4),
|
||||||
|
nn.Linear(512, 256),
|
||||||
|
nn.ReLU()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Dueling architecture
|
||||||
|
self.advantage_stream = nn.Sequential(
|
||||||
|
nn.Linear(256, 128),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(128, self.n_actions)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.value_stream = nn.Sequential(
|
||||||
|
nn.Linear(256, 128),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(128, 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Extrema detection head with increased capacity
|
||||||
|
self.extrema_head = nn.Sequential(
|
||||||
|
nn.Linear(256, 128),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.3),
|
||||||
|
nn.Linear(128, 3) # 0=bottom, 1=top, 2=neither
|
||||||
|
)
|
||||||
|
|
||||||
|
# Price prediction heads with increased capacity
|
||||||
|
self.price_pred_immediate = nn.Sequential(
|
||||||
|
nn.Linear(256, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 3) # Up, Down, Sideways
|
||||||
|
)
|
||||||
|
|
||||||
|
self.price_pred_midterm = nn.Sequential(
|
||||||
|
nn.Linear(256, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 3) # Up, Down, Sideways
|
||||||
|
)
|
||||||
|
|
||||||
|
self.price_pred_longterm = nn.Sequential(
|
||||||
|
nn.Linear(256, 64),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Linear(64, 3) # Up, Down, Sideways
|
||||||
|
)
|
||||||
|
|
||||||
|
# Value prediction with increased capacity
|
||||||
|
self.price_pred_value = nn.Sequential(
|
||||||
|
nn.Linear(256, 128),
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.3),
|
||||||
|
nn.Linear(128, 4) # % change for different timeframes
|
||||||
|
)
|
||||||
|
|
||||||
|
# Additional attention layer for feature refinement
|
||||||
|
self.attention = SelfAttention(256)
|
||||||
|
|
||||||
|
def _check_rebuild_network(self, features):
|
||||||
|
"""Check if network needs to be rebuilt for different feature dimensions"""
|
||||||
|
if features != self.feature_dim:
|
||||||
|
logger.info(f"Rebuilding network for new feature dimension: {features} (was {self.feature_dim})")
|
||||||
|
self.feature_dim = features
|
||||||
|
self._build_network()
|
||||||
|
# Move to device after rebuilding
|
||||||
|
self.to(self.device)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
"""Forward pass through the network"""
|
||||||
|
batch_size = x.size(0)
|
||||||
|
|
||||||
|
# Process different input shapes
|
||||||
|
if len(x.shape) > 2:
|
||||||
|
# Handle 3D input [batch, timeframes, features]
|
||||||
|
if self.conv_layers is not None:
|
||||||
|
# Reshape for 1D convolution:
|
||||||
|
# [batch, timeframes, features] -> [batch, timeframes, features*1]
|
||||||
|
if len(x.shape) == 3:
|
||||||
|
x = x.permute(0, 1, 2) # Ensure shape is [batch, timeframes, features]
|
||||||
|
x_reshaped = x.permute(0, 1, 2) # [batch, timeframes, features]
|
||||||
|
|
||||||
|
# Check if the feature dimension has changed and rebuild if necessary
|
||||||
|
if x_reshaped.size(1) * x_reshaped.size(2) != self.feature_dim:
|
||||||
|
total_features = x_reshaped.size(1) * x_reshaped.size(2)
|
||||||
|
self._check_rebuild_network(total_features)
|
||||||
|
|
||||||
|
# Apply convolutions
|
||||||
|
x_conv = self.conv_layers(x_reshaped)
|
||||||
|
# Flatten: [batch, channels, 1] -> [batch, channels]
|
||||||
|
x_flat = x_conv.view(batch_size, -1)
|
||||||
|
else:
|
||||||
|
# If no conv layers, just flatten
|
||||||
|
x_flat = x.view(batch_size, -1)
|
||||||
|
else:
|
||||||
|
# For 2D input [batch, features]
|
||||||
|
x_flat = x
|
||||||
|
|
||||||
|
# Check if dimensions have changed
|
||||||
|
if x_flat.size(1) != self.feature_dim:
|
||||||
|
self._check_rebuild_network(x_flat.size(1))
|
||||||
|
|
||||||
|
# Apply FC layers
|
||||||
|
features = self.fc_layers(x_flat)
|
||||||
|
|
||||||
|
# Add attention for feature refinement
|
||||||
|
features_3d = features.unsqueeze(1) # [batch, 1, features]
|
||||||
|
features_attended, _ = self.attention(features_3d)
|
||||||
|
features_refined = features_attended.squeeze(1) # [batch, features]
|
||||||
|
|
||||||
|
# Calculate advantage and value
|
||||||
|
advantage = self.advantage_stream(features_refined)
|
||||||
|
value = self.value_stream(features_refined)
|
||||||
|
|
||||||
|
# Combine for Q-values (Dueling architecture)
|
||||||
|
q_values = value + advantage - advantage.mean(dim=1, keepdim=True)
|
||||||
|
|
||||||
|
# Get extrema predictions
|
||||||
|
extrema_pred = self.extrema_head(features_refined)
|
||||||
|
|
||||||
|
# Price movement predictions
|
||||||
|
price_immediate = self.price_pred_immediate(features_refined)
|
||||||
|
price_midterm = self.price_pred_midterm(features_refined)
|
||||||
|
price_longterm = self.price_pred_longterm(features_refined)
|
||||||
|
price_values = self.price_pred_value(features_refined)
|
||||||
|
|
||||||
|
# Package price predictions
|
||||||
|
price_predictions = {
|
||||||
|
'immediate': price_immediate,
|
||||||
|
'midterm': price_midterm,
|
||||||
|
'longterm': price_longterm,
|
||||||
|
'values': price_values
|
||||||
|
}
|
||||||
|
|
||||||
|
return q_values, extrema_pred, price_predictions, features_refined
|
||||||
|
|
||||||
|
def act(self, state, explore=True):
|
||||||
|
"""
|
||||||
|
Choose action based on state with confidence thresholding
|
||||||
|
"""
|
||||||
|
state_tensor = torch.FloatTensor(state).unsqueeze(0).to(self.device)
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
q_values, _, _, _ = self(state_tensor)
|
||||||
|
|
||||||
|
# Apply softmax to get action probabilities
|
||||||
|
action_probs = F.softmax(q_values, dim=1)
|
||||||
|
|
||||||
|
# Get action with highest probability
|
||||||
|
action = action_probs.argmax(dim=1).item()
|
||||||
|
action_confidence = action_probs[0, action].item()
|
||||||
|
|
||||||
|
# Check if confidence exceeds threshold
|
||||||
|
if action_confidence < self.confidence_threshold:
|
||||||
|
# Force HOLD action (typically action 2)
|
||||||
|
action = 2 # Assume 2 is HOLD
|
||||||
|
logger.info(f"Action {action} confidence {action_confidence:.4f} below threshold {self.confidence_threshold}, forcing HOLD")
|
||||||
|
|
||||||
|
return action, action_confidence
|
||||||
|
|
||||||
|
def save(self, path):
|
||||||
|
"""Save model weights and architecture"""
|
||||||
|
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||||
|
torch.save({
|
||||||
|
'state_dict': self.state_dict(),
|
||||||
|
'input_shape': self.input_shape,
|
||||||
|
'n_actions': self.n_actions,
|
||||||
|
'feature_dim': self.feature_dim,
|
||||||
|
'confidence_threshold': self.confidence_threshold
|
||||||
|
}, f"{path}.pt")
|
||||||
|
logger.info(f"Enhanced CNN model saved to {path}.pt")
|
||||||
|
|
||||||
|
def load(self, path):
|
||||||
|
"""Load model weights and architecture"""
|
||||||
|
try:
|
||||||
|
checkpoint = torch.load(f"{path}.pt", map_location=self.device)
|
||||||
|
self.input_shape = checkpoint['input_shape']
|
||||||
|
self.n_actions = checkpoint['n_actions']
|
||||||
|
self.feature_dim = checkpoint['feature_dim']
|
||||||
|
if 'confidence_threshold' in checkpoint:
|
||||||
|
self.confidence_threshold = checkpoint['confidence_threshold']
|
||||||
|
self._build_network()
|
||||||
|
self.load_state_dict(checkpoint['state_dict'])
|
||||||
|
self.to(self.device)
|
||||||
|
logger.info(f"Enhanced CNN model loaded from {path}.pt")
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading model: {str(e)}")
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Additional utility for example sifting
|
||||||
|
class ExampleSiftingDataset:
|
||||||
|
"""
|
||||||
|
Dataset that selectively keeps high-quality examples for training
|
||||||
|
to improve model performance
|
||||||
|
"""
|
||||||
|
def __init__(self, max_examples=50000):
|
||||||
|
self.examples = []
|
||||||
|
self.labels = []
|
||||||
|
self.rewards = []
|
||||||
|
self.max_examples = max_examples
|
||||||
|
self.min_reward_threshold = -0.05 # Minimum reward to keep an example
|
||||||
|
|
||||||
|
def add_example(self, state, action, reward, next_state, done):
|
||||||
|
"""Add a new training example with reward-based filtering"""
|
||||||
|
# Only keep examples with rewards above the threshold
|
||||||
|
if reward > self.min_reward_threshold:
|
||||||
|
self.examples.append((state, action, reward, next_state, done))
|
||||||
|
self.rewards.append(reward)
|
||||||
|
|
||||||
|
# Sort by reward and keep only the top examples
|
||||||
|
if len(self.examples) > self.max_examples:
|
||||||
|
# Sort by reward (highest first)
|
||||||
|
sorted_indices = np.argsort(self.rewards)[::-1]
|
||||||
|
# Keep top examples
|
||||||
|
self.examples = [self.examples[i] for i in sorted_indices[:self.max_examples]]
|
||||||
|
self.rewards = [self.rewards[i] for i in sorted_indices[:self.max_examples]]
|
||||||
|
|
||||||
|
# Update the minimum reward threshold to be the minimum in our kept examples
|
||||||
|
self.min_reward_threshold = min(self.rewards)
|
||||||
|
|
||||||
|
def get_batch(self, batch_size):
|
||||||
|
"""Get a batch of examples, prioritizing better examples"""
|
||||||
|
if not self.examples:
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Calculate selection probabilities based on rewards
|
||||||
|
rewards = np.array(self.rewards)
|
||||||
|
# Shift rewards to be positive for probability calculation
|
||||||
|
min_reward = min(rewards)
|
||||||
|
shifted_rewards = rewards - min_reward + 0.1 # Add small constant
|
||||||
|
probs = shifted_rewards / shifted_rewards.sum()
|
||||||
|
|
||||||
|
# Sample batch indices with reward-based probabilities
|
||||||
|
indices = np.random.choice(
|
||||||
|
len(self.examples),
|
||||||
|
size=min(batch_size, len(self.examples)),
|
||||||
|
p=probs,
|
||||||
|
replace=False
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create batch
|
||||||
|
batch = [self.examples[i] for i in indices]
|
||||||
|
states, actions, rewards, next_states, dones = zip(*batch)
|
||||||
|
|
||||||
|
return {
|
||||||
|
'states': np.array(states),
|
||||||
|
'actions': np.array(actions),
|
||||||
|
'rewards': np.array(rewards),
|
||||||
|
'next_states': np.array(next_states),
|
||||||
|
'dones': np.array(dones)
|
||||||
|
}
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.examples)
|
1
NN/models/saved/dqn_agent_best_metadata.json
Normal file
1
NN/models/saved/dqn_agent_best_metadata.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
{"best_reward": 4791516.572471984, "best_episode": 3250, "best_pnl": 826842167451289.1, "best_win_rate": 0.47368421052631576, "date": "2025-04-01 10:19:16"}
|
20
NN/models/saved/hybrid_stats_latest.json
Normal file
20
NN/models/saved/hybrid_stats_latest.json
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
{
|
||||||
|
"supervised": {
|
||||||
|
"epochs_completed": 22650,
|
||||||
|
"best_val_pnl": 0.0,
|
||||||
|
"best_epoch": 50,
|
||||||
|
"best_win_rate": 0
|
||||||
|
},
|
||||||
|
"reinforcement": {
|
||||||
|
"episodes_completed": 0,
|
||||||
|
"best_reward": -Infinity,
|
||||||
|
"best_episode": 0,
|
||||||
|
"best_win_rate": 0
|
||||||
|
},
|
||||||
|
"hybrid": {
|
||||||
|
"iterations_completed": 453,
|
||||||
|
"best_combined_score": 0.0,
|
||||||
|
"training_started": "2025-04-09T10:30:42.510856",
|
||||||
|
"last_update": "2025-04-09T10:40:02.217840"
|
||||||
|
}
|
||||||
|
}
|
326
NN/models/saved/realtime_ticks_training_stats.json
Normal file
326
NN/models/saved/realtime_ticks_training_stats.json
Normal file
@ -0,0 +1,326 @@
|
|||||||
|
{
|
||||||
|
"epochs_completed": 8,
|
||||||
|
"best_val_pnl": 0.0,
|
||||||
|
"best_epoch": 1,
|
||||||
|
"best_win_rate": 0.0,
|
||||||
|
"training_started": "2025-04-02T10:43:58.946682",
|
||||||
|
"last_update": "2025-04-02T10:44:10.940892",
|
||||||
|
"epochs": [
|
||||||
|
{
|
||||||
|
"epoch": 1,
|
||||||
|
"train_loss": 1.0950355529785156,
|
||||||
|
"val_loss": 1.1657923062642415,
|
||||||
|
"train_acc": 0.3255208333333333,
|
||||||
|
"val_acc": 0.0,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:01.840889",
|
||||||
|
"data_age": 2,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 2,
|
||||||
|
"train_loss": 1.0831659038861592,
|
||||||
|
"val_loss": 1.1212460199991863,
|
||||||
|
"train_acc": 0.390625,
|
||||||
|
"val_acc": 0.0,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:03.134833",
|
||||||
|
"data_age": 4,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 3,
|
||||||
|
"train_loss": 1.0740693012873332,
|
||||||
|
"val_loss": 1.0992945830027263,
|
||||||
|
"train_acc": 0.4739583333333333,
|
||||||
|
"val_acc": 0.0,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:04.425272",
|
||||||
|
"data_age": 5,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 4,
|
||||||
|
"train_loss": 1.0747728943824768,
|
||||||
|
"val_loss": 1.0821794271469116,
|
||||||
|
"train_acc": 0.4609375,
|
||||||
|
"val_acc": 0.3229166666666667,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:05.716421",
|
||||||
|
"data_age": 6,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 5,
|
||||||
|
"train_loss": 1.0489931503931682,
|
||||||
|
"val_loss": 1.0669521888097127,
|
||||||
|
"train_acc": 0.5833333333333334,
|
||||||
|
"val_acc": 1.0,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:07.007935",
|
||||||
|
"data_age": 8,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 6,
|
||||||
|
"train_loss": 1.0533669590950012,
|
||||||
|
"val_loss": 1.0505590836207073,
|
||||||
|
"train_acc": 0.5104166666666666,
|
||||||
|
"val_acc": 1.0,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:08.296061",
|
||||||
|
"data_age": 9,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 7,
|
||||||
|
"train_loss": 1.0456886688868205,
|
||||||
|
"val_loss": 1.0351698795954387,
|
||||||
|
"train_acc": 0.5651041666666666,
|
||||||
|
"val_acc": 1.0,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:09.607584",
|
||||||
|
"data_age": 10,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 8,
|
||||||
|
"train_loss": 1.040040671825409,
|
||||||
|
"val_loss": 1.0227736632029216,
|
||||||
|
"train_acc": 0.6119791666666666,
|
||||||
|
"val_acc": 1.0,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 1.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-04-02T10:44:10.940892",
|
||||||
|
"data_age": 11,
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"overall_win_rate": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"cumulative_pnl": {
|
||||||
|
"train": 0.0,
|
||||||
|
"val": 0.0
|
||||||
|
},
|
||||||
|
"total_trades": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
},
|
||||||
|
"total_wins": {
|
||||||
|
"train": 0,
|
||||||
|
"val": 0
|
||||||
|
}
|
||||||
|
}
|
192
NN/models/saved/realtime_training_stats.json
Normal file
192
NN/models/saved/realtime_training_stats.json
Normal file
@ -0,0 +1,192 @@
|
|||||||
|
{
|
||||||
|
"epochs_completed": 7,
|
||||||
|
"best_val_pnl": 0.002028853100759435,
|
||||||
|
"best_epoch": 6,
|
||||||
|
"best_win_rate": 0.5157894736842106,
|
||||||
|
"training_started": "2025-03-31T02:50:10.418670",
|
||||||
|
"last_update": "2025-03-31T02:50:15.227593",
|
||||||
|
"epochs": [
|
||||||
|
{
|
||||||
|
"epoch": 1,
|
||||||
|
"train_loss": 1.1206786036491394,
|
||||||
|
"val_loss": 1.0542699098587036,
|
||||||
|
"train_acc": 0.11197916666666667,
|
||||||
|
"val_acc": 0.25,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 1.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 1.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-03-31T02:50:12.881423",
|
||||||
|
"data_age": 2
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 2,
|
||||||
|
"train_loss": 1.1266120672225952,
|
||||||
|
"val_loss": 1.072133183479309,
|
||||||
|
"train_acc": 0.1171875,
|
||||||
|
"val_acc": 0.25,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 1.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 1.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-03-31T02:50:13.186840",
|
||||||
|
"data_age": 2
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 3,
|
||||||
|
"train_loss": 1.1415620843569438,
|
||||||
|
"val_loss": 1.1701548099517822,
|
||||||
|
"train_acc": 0.1015625,
|
||||||
|
"val_acc": 0.5208333333333334,
|
||||||
|
"train_pnl": 0.0,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.0,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 1.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 1.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-03-31T02:50:13.442018",
|
||||||
|
"data_age": 3
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 4,
|
||||||
|
"train_loss": 1.1331567962964375,
|
||||||
|
"val_loss": 1.070081114768982,
|
||||||
|
"train_acc": 0.09375,
|
||||||
|
"val_acc": 0.22916666666666666,
|
||||||
|
"train_pnl": 0.010650217327384765,
|
||||||
|
"val_pnl": -0.0007049481907895126,
|
||||||
|
"train_win_rate": 0.49279538904899134,
|
||||||
|
"val_win_rate": 0.40625,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.9036458333333334,
|
||||||
|
"HOLD": 0.09635416666666667
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.3333333333333333,
|
||||||
|
"HOLD": 0.6666666666666666
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-03-31T02:50:13.739899",
|
||||||
|
"data_age": 3
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 5,
|
||||||
|
"train_loss": 1.10965762535731,
|
||||||
|
"val_loss": 1.0485950708389282,
|
||||||
|
"train_acc": 0.12239583333333333,
|
||||||
|
"val_acc": 0.17708333333333334,
|
||||||
|
"train_pnl": 0.011924086862580204,
|
||||||
|
"val_pnl": 0.0,
|
||||||
|
"train_win_rate": 0.5070422535211268,
|
||||||
|
"val_win_rate": 0.0,
|
||||||
|
"best_position_size": 0.1,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.7395833333333334,
|
||||||
|
"HOLD": 0.2604166666666667
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.0,
|
||||||
|
"HOLD": 1.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-03-31T02:50:14.073439",
|
||||||
|
"data_age": 3
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 6,
|
||||||
|
"train_loss": 1.1272419293721516,
|
||||||
|
"val_loss": 1.084235429763794,
|
||||||
|
"train_acc": 0.1015625,
|
||||||
|
"val_acc": 0.22916666666666666,
|
||||||
|
"train_pnl": 0.014825159601390072,
|
||||||
|
"val_pnl": 0.00405770620151887,
|
||||||
|
"train_win_rate": 0.4908616187989556,
|
||||||
|
"val_win_rate": 0.5157894736842106,
|
||||||
|
"best_position_size": 2.0,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 1.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 1.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-03-31T02:50:14.658295",
|
||||||
|
"data_age": 4
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"epoch": 7,
|
||||||
|
"train_loss": 1.1171108484268188,
|
||||||
|
"val_loss": 1.0741244554519653,
|
||||||
|
"train_acc": 0.1171875,
|
||||||
|
"val_acc": 0.22916666666666666,
|
||||||
|
"train_pnl": 0.0059474696523706605,
|
||||||
|
"val_pnl": 0.00405770620151887,
|
||||||
|
"train_win_rate": 0.4838709677419355,
|
||||||
|
"val_win_rate": 0.5157894736842106,
|
||||||
|
"best_position_size": 2.0,
|
||||||
|
"signal_distribution": {
|
||||||
|
"train": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 0.7291666666666666,
|
||||||
|
"HOLD": 0.2708333333333333
|
||||||
|
},
|
||||||
|
"val": {
|
||||||
|
"BUY": 0.0,
|
||||||
|
"SELL": 1.0,
|
||||||
|
"HOLD": 0.0
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"timestamp": "2025-03-31T02:50:15.227593",
|
||||||
|
"data_age": 4
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -112,27 +112,33 @@ class SimpleCNN(nn.Module):
|
|||||||
def _build_network(self):
|
def _build_network(self):
|
||||||
"""Build the neural network with current feature dimensions"""
|
"""Build the neural network with current feature dimensions"""
|
||||||
# Create a flexible architecture that adapts to input dimensions
|
# Create a flexible architecture that adapts to input dimensions
|
||||||
|
# Increased complexity
|
||||||
self.fc_layers = nn.Sequential(
|
self.fc_layers = nn.Sequential(
|
||||||
nn.Linear(self.feature_dim, 256),
|
nn.Linear(self.feature_dim, 512), # Increased size
|
||||||
nn.ReLU(),
|
nn.ReLU(),
|
||||||
nn.Linear(256, 256),
|
nn.Dropout(0.2), # Added dropout
|
||||||
nn.ReLU()
|
nn.Linear(512, 512), # Increased size
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.2), # Added dropout
|
||||||
|
nn.Linear(512, 512), # Added layer
|
||||||
|
nn.ReLU(),
|
||||||
|
nn.Dropout(0.2) # Added dropout
|
||||||
)
|
)
|
||||||
|
|
||||||
# Output heads (Dueling DQN architecture)
|
# Output heads (Dueling DQN architecture)
|
||||||
self.advantage_head = nn.Linear(256, self.n_actions)
|
self.advantage_head = nn.Linear(512, self.n_actions) # Updated input size
|
||||||
self.value_head = nn.Linear(256, 1)
|
self.value_head = nn.Linear(512, 1) # Updated input size
|
||||||
|
|
||||||
# Extrema detection head
|
# Extrema detection head
|
||||||
self.extrema_head = nn.Linear(256, 3) # 0=bottom, 1=top, 2=neither
|
self.extrema_head = nn.Linear(512, 3) # 0=bottom, 1=top, 2=neither, Updated input size
|
||||||
|
|
||||||
# Price prediction heads for different timeframes
|
# Price prediction heads for different timeframes
|
||||||
self.price_pred_immediate = nn.Linear(256, 3) # Up, Down, Sideways for immediate term (1s, 1m)
|
self.price_pred_immediate = nn.Linear(512, 3) # Updated input size
|
||||||
self.price_pred_midterm = nn.Linear(256, 3) # Up, Down, Sideways for mid-term (1h)
|
self.price_pred_midterm = nn.Linear(512, 3) # Updated input size
|
||||||
self.price_pred_longterm = nn.Linear(256, 3) # Up, Down, Sideways for long-term (1d)
|
self.price_pred_longterm = nn.Linear(512, 3) # Updated input size
|
||||||
|
|
||||||
# Regression heads for exact price prediction
|
# Regression heads for exact price prediction
|
||||||
self.price_pred_value = nn.Linear(256, 4) # Predicts % change for each timeframe (1s, 1m, 1h, 1d)
|
self.price_pred_value = nn.Linear(512, 4) # Updated input size
|
||||||
|
|
||||||
def _check_rebuild_network(self, features):
|
def _check_rebuild_network(self, features):
|
||||||
"""Check if network needs to be rebuilt for different feature dimensions"""
|
"""Check if network needs to be rebuilt for different feature dimensions"""
|
||||||
@ -146,58 +152,70 @@ class SimpleCNN(nn.Module):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
"""
|
"""Forward pass through the network"""
|
||||||
Forward pass through the network
|
# Flatten input if needed to ensure it matches the expected feature dimension
|
||||||
Returns action values, extrema predictions, and price movement predictions for multiple timeframes
|
batch_size = x.size(0)
|
||||||
"""
|
|
||||||
# Handle different input shapes
|
|
||||||
if len(x.shape) == 2: # [batch_size, features]
|
|
||||||
# Simple feature vector
|
|
||||||
batch_size, features = x.shape
|
|
||||||
# Check if we need to rebuild the network for new dimensions
|
|
||||||
self._check_rebuild_network(features)
|
|
||||||
|
|
||||||
elif len(x.shape) == 3: # [batch_size, timeframes/channels, features]
|
|
||||||
# Reshape to flatten timeframes/channels with features
|
|
||||||
batch_size, timeframes, features = x.shape
|
|
||||||
total_features = timeframes * features
|
|
||||||
|
|
||||||
# Check if we need to rebuild the network for new dimensions
|
|
||||||
self._check_rebuild_network(total_features)
|
|
||||||
|
|
||||||
# Reshape tensor to [batch_size, total_features]
|
|
||||||
x = x.reshape(batch_size, total_features)
|
|
||||||
|
|
||||||
# Apply fully connected layers
|
|
||||||
fc_out = self.fc_layers(x)
|
|
||||||
|
|
||||||
# Dueling architecture
|
# Reshape input if needed
|
||||||
advantage = self.advantage_head(fc_out)
|
if len(x.shape) > 2: # Handle multi-dimensional input
|
||||||
value = self.value_head(fc_out)
|
# For 3D input: [batch, seq_len, features] or [batch, channels, features]
|
||||||
|
x = x.reshape(batch_size, -1) # Flatten to [batch, seq_len*features]
|
||||||
|
|
||||||
# Q-values = value + (advantage - mean(advantage))
|
# Check if the feature dimension matches and rebuild if necessary
|
||||||
action_values = value + advantage - advantage.mean(dim=1, keepdim=True)
|
if x.size(1) != self.feature_dim:
|
||||||
|
self._check_rebuild_network(x.size(1))
|
||||||
|
|
||||||
# Extrema predictions
|
# Apply fully connected layers with ReLU activation
|
||||||
extrema_pred = self.extrema_head(fc_out)
|
x = self.fc_layers(x)
|
||||||
|
|
||||||
# Price movement predictions for different timeframes
|
# Branch 1: Action values (Q-values)
|
||||||
price_immediate = self.price_pred_immediate(fc_out) # 1s, 1m
|
action_values = self.advantage_head(x)
|
||||||
price_midterm = self.price_pred_midterm(fc_out) # 1h
|
|
||||||
price_longterm = self.price_pred_longterm(fc_out) # 1d
|
|
||||||
|
|
||||||
# Regression values for exact price predictions (percentage changes)
|
# Branch 2: Extrema detection (market top/bottom classification)
|
||||||
price_values = self.price_pred_value(fc_out)
|
extrema_pred = self.extrema_head(x)
|
||||||
|
|
||||||
# Return all predictions in a structured dictionary
|
# Branch 3: Price movement prediction over different timeframes
|
||||||
|
# Split into three timeframes: immediate, midterm, longterm
|
||||||
|
price_immediate = self.price_pred_immediate(x)
|
||||||
|
price_midterm = self.price_pred_midterm(x)
|
||||||
|
price_longterm = self.price_pred_longterm(x)
|
||||||
|
|
||||||
|
# Branch 4: Value prediction (regression for expected price changes)
|
||||||
|
price_values = self.price_pred_value(x)
|
||||||
|
|
||||||
|
# Package price predictions
|
||||||
price_predictions = {
|
price_predictions = {
|
||||||
'immediate': price_immediate,
|
'immediate': price_immediate, # Classification (up/down/sideways)
|
||||||
'midterm': price_midterm,
|
'midterm': price_midterm, # Classification (up/down/sideways)
|
||||||
'longterm': price_longterm,
|
'longterm': price_longterm, # Classification (up/down/sideways)
|
||||||
'values': price_values
|
'values': price_values # Regression (expected % change)
|
||||||
}
|
}
|
||||||
|
|
||||||
return action_values, extrema_pred, price_predictions
|
# Return all outputs and the hidden feature representation
|
||||||
|
return action_values, extrema_pred, price_predictions, x
|
||||||
|
|
||||||
|
def extract_features(self, x):
|
||||||
|
"""Extract hidden features from the input and return both action values and features"""
|
||||||
|
# Flatten input if needed to ensure it matches the expected feature dimension
|
||||||
|
batch_size = x.size(0)
|
||||||
|
|
||||||
|
# Reshape input if needed
|
||||||
|
if len(x.shape) > 2: # Handle multi-dimensional input
|
||||||
|
# For 3D input: [batch, seq_len, features] or [batch, channels, features]
|
||||||
|
x = x.reshape(batch_size, -1) # Flatten to [batch, seq_len*features]
|
||||||
|
|
||||||
|
# Check if the feature dimension matches and rebuild if necessary
|
||||||
|
if x.size(1) != self.feature_dim:
|
||||||
|
self._check_rebuild_network(x.size(1))
|
||||||
|
|
||||||
|
# Apply fully connected layers with ReLU activation
|
||||||
|
x_features = self.fc_layers(x)
|
||||||
|
|
||||||
|
# Branch 1: Action values (Q-values)
|
||||||
|
action_values = self.advantage_head(x_features)
|
||||||
|
|
||||||
|
# Return action values and the hidden feature representation
|
||||||
|
return action_values, x_features
|
||||||
|
|
||||||
def save(self, path):
|
def save(self, path):
|
||||||
"""Save model weights and architecture"""
|
"""Save model weights and architecture"""
|
||||||
@ -241,8 +259,10 @@ class CNNModelPyTorch(nn.Module):
|
|||||||
self.output_size = output_size
|
self.output_size = output_size
|
||||||
self.timeframes = timeframes
|
self.timeframes = timeframes
|
||||||
|
|
||||||
# Calculate total input features across all timeframes
|
# num_features should already be the total features across all timeframes
|
||||||
self.total_features = num_features * len(timeframes)
|
self.total_features = num_features
|
||||||
|
logger.info(f"CNNModelPyTorch initialized with window_size={window_size}, num_features={num_features}, "
|
||||||
|
f"total_features={self.total_features}, output_size={output_size}, timeframes={timeframes}")
|
||||||
|
|
||||||
# Device configuration
|
# Device configuration
|
||||||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||||
@ -317,6 +337,10 @@ class CNNModelPyTorch(nn.Module):
|
|||||||
# Ensure input is on the correct device
|
# Ensure input is on the correct device
|
||||||
x = x.to(self.device)
|
x = x.to(self.device)
|
||||||
|
|
||||||
|
# Log input tensor shape for debugging
|
||||||
|
input_shape = x.size()
|
||||||
|
logger.debug(f"Input tensor shape: {input_shape}")
|
||||||
|
|
||||||
# Check input dimensions and reshape as needed
|
# Check input dimensions and reshape as needed
|
||||||
if len(x.size()) == 2:
|
if len(x.size()) == 2:
|
||||||
# If input is [batch_size, features], reshape to [batch_size, features, 1]
|
# If input is [batch_size, features], reshape to [batch_size, features, 1]
|
||||||
@ -324,8 +348,17 @@ class CNNModelPyTorch(nn.Module):
|
|||||||
|
|
||||||
# Check and handle if input features don't match model expectations
|
# Check and handle if input features don't match model expectations
|
||||||
if feature_dim != self.total_features:
|
if feature_dim != self.total_features:
|
||||||
logger.warning(f"Input features ({feature_dim}) don't match model features ({self.total_features}), rebuilding layers")
|
logger.warning(f"Input features ({feature_dim}) don't match model features ({self.total_features})")
|
||||||
self.rebuild_conv_layers(feature_dim)
|
if not hasattr(self, 'rebuild_warning_shown'):
|
||||||
|
logger.error(f"Dimension mismatch: Expected {self.total_features} features but got {feature_dim}")
|
||||||
|
self.rebuild_warning_shown = True
|
||||||
|
# Don't rebuild - instead adapt the input
|
||||||
|
# If features are fewer, pad with zeros. If more, truncate
|
||||||
|
if feature_dim < self.total_features:
|
||||||
|
padding = torch.zeros(batch_size, self.total_features - feature_dim, device=self.device)
|
||||||
|
x = torch.cat([x, padding], dim=1)
|
||||||
|
else:
|
||||||
|
x = x[:, :self.total_features]
|
||||||
|
|
||||||
# For 1D input, use a sequence length of 1
|
# For 1D input, use a sequence length of 1
|
||||||
seq_len = 1
|
seq_len = 1
|
||||||
@ -336,14 +369,26 @@ class CNNModelPyTorch(nn.Module):
|
|||||||
|
|
||||||
# Check and handle if input dimensions don't match model expectations
|
# Check and handle if input dimensions don't match model expectations
|
||||||
if feature_dim != self.total_features:
|
if feature_dim != self.total_features:
|
||||||
logger.warning(f"Input features ({feature_dim}) don't match model features ({self.total_features}), rebuilding layers")
|
logger.warning(f"Input features ({feature_dim}) don't match model features ({self.total_features})")
|
||||||
self.rebuild_conv_layers(feature_dim)
|
if not hasattr(self, 'rebuild_warning_shown'):
|
||||||
|
logger.error(f"Dimension mismatch: Expected {self.total_features} features but got {feature_dim}")
|
||||||
|
self.rebuild_warning_shown = True
|
||||||
|
# Don't rebuild - instead adapt the input
|
||||||
|
# If features are fewer, pad with zeros. If more, truncate
|
||||||
|
if feature_dim < self.total_features:
|
||||||
|
padding = torch.zeros(batch_size, seq_len, self.total_features - feature_dim, device=self.device)
|
||||||
|
x = torch.cat([x, padding], dim=2)
|
||||||
|
else:
|
||||||
|
x = x[:, :, :self.total_features]
|
||||||
|
|
||||||
# Reshape input: [batch, window_size, features] -> [batch, features, window_size]
|
# Reshape input: [batch, window_size, features] -> [batch, features, window_size]
|
||||||
x = x.permute(0, 2, 1)
|
x = x.permute(0, 2, 1)
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unexpected input shape: {x.size()}, expected 2D or 3D tensor")
|
raise ValueError(f"Unexpected input shape: {x.size()}, expected 2D or 3D tensor")
|
||||||
|
|
||||||
|
# Log reshaped tensor for debugging
|
||||||
|
logger.debug(f"Reshaped tensor for convolution: {x.size()}")
|
||||||
|
|
||||||
# Convolutional layers with dropout - safely handle small spatial dimensions
|
# Convolutional layers with dropout - safely handle small spatial dimensions
|
||||||
try:
|
try:
|
||||||
x = self.dropout1(F.relu(self.norm1(self.conv1(x))))
|
x = self.dropout1(F.relu(self.norm1(self.conv1(x))))
|
||||||
|
@ -375,7 +375,7 @@ def realtime(data_interface, model, args, chart=None, symbol=None):
|
|||||||
logger.info(f"Starting real-time inference mode for {symbol}...")
|
logger.info(f"Starting real-time inference mode for {symbol}...")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from NN.utils.realtime_analyzer import RealtimeAnalyzer
|
from NN.utils.realtime_analyzer import RealtimeAnalyzer
|
||||||
|
|
||||||
# Load the latest model
|
# Load the latest model
|
||||||
model_dir = os.path.join('models')
|
model_dir = os.path.join('models')
|
||||||
|
585
NN/train_enhanced.py
Normal file
585
NN/train_enhanced.py
Normal file
@ -0,0 +1,585 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import logging
|
||||||
|
import argparse
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.optim as optim
|
||||||
|
from torch.utils.data import TensorDataset, DataLoader
|
||||||
|
import contextlib
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
|
||||||
|
# Add parent directory to path
|
||||||
|
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
# Import our enhanced agent
|
||||||
|
from NN.models.dqn_agent_enhanced import EnhancedDQNAgent
|
||||||
|
from NN.utils.data_interface import DataInterface
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.StreamHandler(),
|
||||||
|
logging.FileHandler('logs/enhanced_training.log')
|
||||||
|
]
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
"""Parse command line arguments"""
|
||||||
|
parser = argparse.ArgumentParser(description='Train enhanced RL trading agent')
|
||||||
|
parser.add_argument('--episodes', type=int, default=100, help='Number of episodes to train')
|
||||||
|
parser.add_argument('--max-steps', type=int, default=2000, help='Maximum steps per episode')
|
||||||
|
parser.add_argument('--symbol', type=str, default='ETH/USDT', help='Trading symbol')
|
||||||
|
parser.add_argument('--no-gpu', action='store_true', help='Disable GPU usage')
|
||||||
|
parser.add_argument('--confidence', type=float, default=0.4, help='Confidence threshold')
|
||||||
|
parser.add_argument('--load-model', type=str, default='', help='Load existing model')
|
||||||
|
parser.add_argument('--batch-size', type=int, default=128, help='Training batch size')
|
||||||
|
parser.add_argument('--learning-rate', type=float, default=0.0003, help='Learning rate')
|
||||||
|
parser.add_argument('--no-pretrain', action='store_true', help='Skip pre-training')
|
||||||
|
parser.add_argument('--pretrain-epochs', type=int, default=20, help='Number of pre-training epochs')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def generate_price_prediction_training_data(data_1m, data_1h, data_1d, window_size=20):
|
||||||
|
"""
|
||||||
|
Generate labeled training data for price prediction pre-training
|
||||||
|
|
||||||
|
Args:
|
||||||
|
data_1m: 1-minute candle data
|
||||||
|
data_1h: 1-hour candle data
|
||||||
|
data_1d: 1-day candle data
|
||||||
|
window_size: Size of the observation window
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
X, y_immediate, y_midterm, y_longterm, y_values
|
||||||
|
"""
|
||||||
|
logger.info("Generating price prediction training data")
|
||||||
|
|
||||||
|
# Features to use
|
||||||
|
ohlcv_columns = ['open', 'high', 'low', 'close', 'volume']
|
||||||
|
|
||||||
|
# Create feature sets
|
||||||
|
X = []
|
||||||
|
y_immediate = [] # 1m prediction (next 5min)
|
||||||
|
y_midterm = [] # 1h prediction (next few hours)
|
||||||
|
y_longterm = [] # 1d prediction (next day)
|
||||||
|
y_values = [] # % change for each timeframe
|
||||||
|
|
||||||
|
# Need enough data for all timeframes
|
||||||
|
if len(data_1m) < window_size + 5 or len(data_1h) < 2 or len(data_1d) < 2:
|
||||||
|
logger.error("Not enough data for all timeframes")
|
||||||
|
return np.array([]), np.array([]), np.array([]), np.array([]), np.array([])
|
||||||
|
|
||||||
|
# Generate examples
|
||||||
|
for i in range(window_size, len(data_1m) - 5):
|
||||||
|
# Skip if we can't align with higher timeframes
|
||||||
|
if i % 60 != 0: # Only use minutes that align with hour boundaries
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get window of 1m data as input
|
||||||
|
window_1m = data_1m[i-window_size:i][ohlcv_columns].values
|
||||||
|
|
||||||
|
# Find corresponding indices in higher timeframes
|
||||||
|
curr_timestamp = data_1m.index[i]
|
||||||
|
h_idx = data_1h.index.get_indexer([curr_timestamp], method='nearest')[0]
|
||||||
|
d_idx = data_1d.index.get_indexer([curr_timestamp], method='nearest')[0]
|
||||||
|
|
||||||
|
# Skip if indices are out of bounds
|
||||||
|
if h_idx < 0 or h_idx >= len(data_1h) - 1 or d_idx < 0 or d_idx >= len(data_1d) - 1:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Get future prices for label generation
|
||||||
|
future_5m = data_1m[i+5]['close']
|
||||||
|
future_1h = data_1h[h_idx+1]['close']
|
||||||
|
future_1d = data_1d[d_idx+1]['close']
|
||||||
|
|
||||||
|
current_price = data_1m[i]['close']
|
||||||
|
|
||||||
|
# Calculate % change for each timeframe
|
||||||
|
change_5m = (future_5m - current_price) / current_price * 100
|
||||||
|
change_1h = (future_1h - current_price) / current_price * 100
|
||||||
|
change_1d = (future_1d - current_price) / current_price * 100
|
||||||
|
|
||||||
|
# Determine price direction (0=down, 1=sideways, 2=up)
|
||||||
|
def get_direction(change):
|
||||||
|
if change < -0.5: # Down if less than -0.5%
|
||||||
|
return 0
|
||||||
|
elif change > 0.5: # Up if more than 0.5%
|
||||||
|
return 2
|
||||||
|
else: # Sideways if between -0.5% and 0.5%
|
||||||
|
return 1
|
||||||
|
|
||||||
|
direction_5m = get_direction(change_5m)
|
||||||
|
direction_1h = get_direction(change_1h)
|
||||||
|
direction_1d = get_direction(change_1d)
|
||||||
|
|
||||||
|
# Add to dataset
|
||||||
|
X.append(window_1m.flatten())
|
||||||
|
y_immediate.append(direction_5m)
|
||||||
|
y_midterm.append(direction_1h)
|
||||||
|
y_longterm.append(direction_1d)
|
||||||
|
y_values.append([change_5m, change_1h, change_1d, 0]) # Last value reserved
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.warning(f"Error generating training example at index {i}: {str(e)}")
|
||||||
|
|
||||||
|
# Convert to numpy arrays
|
||||||
|
X = np.array(X)
|
||||||
|
y_immediate = np.array(y_immediate)
|
||||||
|
y_midterm = np.array(y_midterm)
|
||||||
|
y_longterm = np.array(y_longterm)
|
||||||
|
y_values = np.array(y_values)
|
||||||
|
|
||||||
|
logger.info(f"Generated {len(X)} training examples")
|
||||||
|
logger.info(f"Class distribution - Immediate: {np.bincount(y_immediate)}, "
|
||||||
|
f"Midterm: {np.bincount(y_midterm)}, Long-term: {np.bincount(y_longterm)}")
|
||||||
|
|
||||||
|
return X, y_immediate, y_midterm, y_longterm, y_values
|
||||||
|
|
||||||
|
def pretrain_price_prediction(agent, data_interface, n_epochs=20, batch_size=128, device=None):
|
||||||
|
"""
|
||||||
|
Pre-train the price prediction capabilities of the agent
|
||||||
|
|
||||||
|
Args:
|
||||||
|
agent: EnhancedDQNAgent instance
|
||||||
|
data_interface: DataInterface instance
|
||||||
|
n_epochs: Number of pre-training epochs
|
||||||
|
batch_size: Batch size for pre-training
|
||||||
|
device: Device to use for pre-training
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
The pre-trained agent
|
||||||
|
"""
|
||||||
|
logger.info("Starting price prediction pre-training")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Ensure we have the necessary timeframes
|
||||||
|
timeframes_needed = ['1m', '1h', '1d']
|
||||||
|
for tf in timeframes_needed:
|
||||||
|
if tf not in data_interface.timeframes:
|
||||||
|
logger.info(f"Adding timeframe {tf} for pre-training")
|
||||||
|
# Add timeframe to the list if not present
|
||||||
|
if tf not in data_interface.timeframes:
|
||||||
|
data_interface.timeframes.append(tf)
|
||||||
|
data_interface.dataframes[tf] = None
|
||||||
|
|
||||||
|
# Get data for each timeframe
|
||||||
|
data_1m = data_interface.get_historical_data(timeframe='1m')
|
||||||
|
data_1h = data_interface.get_historical_data(timeframe='1h')
|
||||||
|
data_1d = data_interface.get_historical_data(timeframe='1d')
|
||||||
|
|
||||||
|
# Generate labeled training data
|
||||||
|
X, y_immediate, y_midterm, y_longterm, y_values = generate_price_prediction_training_data(
|
||||||
|
data_1m, data_1h, data_1d, window_size=20
|
||||||
|
)
|
||||||
|
|
||||||
|
if len(X) == 0:
|
||||||
|
logger.error("No training examples generated. Skipping pre-training.")
|
||||||
|
return agent
|
||||||
|
|
||||||
|
# Split data into training and validation sets
|
||||||
|
X_train, X_val, y_imm_train, y_imm_val, y_mid_train, y_mid_val, y_long_train, y_long_val, y_val_train, y_val_val = train_test_split(
|
||||||
|
X, y_immediate, y_midterm, y_longterm, y_values, test_size=0.2, random_state=42
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert to torch tensors
|
||||||
|
X_train_tensor = torch.FloatTensor(X_train).to(device)
|
||||||
|
y_imm_train_tensor = torch.LongTensor(y_imm_train).to(device)
|
||||||
|
y_mid_train_tensor = torch.LongTensor(y_mid_train).to(device)
|
||||||
|
y_long_train_tensor = torch.LongTensor(y_long_train).to(device)
|
||||||
|
y_val_train_tensor = torch.FloatTensor(y_val_train).to(device)
|
||||||
|
|
||||||
|
X_val_tensor = torch.FloatTensor(X_val).to(device)
|
||||||
|
y_imm_val_tensor = torch.LongTensor(y_imm_val).to(device)
|
||||||
|
y_mid_val_tensor = torch.LongTensor(y_mid_val).to(device)
|
||||||
|
y_long_val_tensor = torch.LongTensor(y_long_val).to(device)
|
||||||
|
y_val_val_tensor = torch.FloatTensor(y_val_val).to(device)
|
||||||
|
|
||||||
|
# Calculate class weights for imbalanced data
|
||||||
|
def get_class_weights(labels):
|
||||||
|
counts = np.bincount(labels)
|
||||||
|
if len(counts) < 3: # Ensure we have 3 classes
|
||||||
|
counts = np.append(counts, [0] * (3 - len(counts)))
|
||||||
|
weights = 1.0 / np.array(counts)
|
||||||
|
weights = weights / np.sum(weights) # Normalize
|
||||||
|
return weights
|
||||||
|
|
||||||
|
imm_weights = torch.FloatTensor(get_class_weights(y_imm_train)).to(device)
|
||||||
|
mid_weights = torch.FloatTensor(get_class_weights(y_mid_train)).to(device)
|
||||||
|
long_weights = torch.FloatTensor(get_class_weights(y_long_train)).to(device)
|
||||||
|
|
||||||
|
# Create DataLoader for batch training
|
||||||
|
train_dataset = TensorDataset(
|
||||||
|
X_train_tensor, y_imm_train_tensor, y_mid_train_tensor,
|
||||||
|
y_long_train_tensor, y_val_train_tensor
|
||||||
|
)
|
||||||
|
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
|
||||||
|
|
||||||
|
# Set up loss functions with class weights
|
||||||
|
imm_criterion = nn.CrossEntropyLoss(weight=imm_weights)
|
||||||
|
mid_criterion = nn.CrossEntropyLoss(weight=mid_weights)
|
||||||
|
long_criterion = nn.CrossEntropyLoss(weight=long_weights)
|
||||||
|
value_criterion = nn.MSELoss()
|
||||||
|
|
||||||
|
# Set up optimizer (separate from agent's optimizer)
|
||||||
|
pretrain_optimizer = torch.optim.Adam(agent.policy_net.parameters(), lr=0.0002)
|
||||||
|
pretrain_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
||||||
|
pretrain_optimizer, mode='min', factor=0.5, patience=3, verbose=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set model to training mode
|
||||||
|
agent.policy_net.train()
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
best_val_loss = float('inf')
|
||||||
|
patience = 5
|
||||||
|
patience_counter = 0
|
||||||
|
|
||||||
|
# Create TensorBoard writer for pre-training
|
||||||
|
writer = SummaryWriter(log_dir=f'runs/pretrain_{int(time.time())}')
|
||||||
|
|
||||||
|
for epoch in range(n_epochs):
|
||||||
|
# Training phase
|
||||||
|
train_loss = 0.0
|
||||||
|
imm_correct, mid_correct, long_correct = 0, 0, 0
|
||||||
|
total = 0
|
||||||
|
|
||||||
|
for X_batch, y_imm_batch, y_mid_batch, y_long_batch, y_val_batch in train_loader:
|
||||||
|
# Zero gradients
|
||||||
|
pretrain_optimizer.zero_grad()
|
||||||
|
|
||||||
|
# Forward pass
|
||||||
|
with torch.cuda.amp.autocast() if agent.use_mixed_precision else contextlib.nullcontext():
|
||||||
|
q_values, _, price_preds, _ = agent.policy_net(X_batch)
|
||||||
|
|
||||||
|
# Calculate losses for each prediction head
|
||||||
|
imm_loss = imm_criterion(price_preds['immediate'], y_imm_batch)
|
||||||
|
mid_loss = mid_criterion(price_preds['midterm'], y_mid_batch)
|
||||||
|
long_loss = long_criterion(price_preds['longterm'], y_long_batch)
|
||||||
|
value_loss = value_criterion(price_preds['values'], y_val_batch)
|
||||||
|
|
||||||
|
# Combined loss (weighted by importance)
|
||||||
|
total_loss = imm_loss + 0.7 * mid_loss + 0.5 * long_loss + 0.3 * value_loss
|
||||||
|
|
||||||
|
# Backward pass and optimize
|
||||||
|
if agent.use_mixed_precision:
|
||||||
|
agent.scaler.scale(total_loss).backward()
|
||||||
|
agent.scaler.unscale_(pretrain_optimizer)
|
||||||
|
torch.nn.utils.clip_grad_norm_(agent.policy_net.parameters(), 1.0)
|
||||||
|
agent.scaler.step(pretrain_optimizer)
|
||||||
|
agent.scaler.update()
|
||||||
|
else:
|
||||||
|
total_loss.backward()
|
||||||
|
torch.nn.utils.clip_grad_norm_(agent.policy_net.parameters(), 1.0)
|
||||||
|
pretrain_optimizer.step()
|
||||||
|
|
||||||
|
# Accumulate metrics
|
||||||
|
train_loss += total_loss.item()
|
||||||
|
total += X_batch.size(0)
|
||||||
|
|
||||||
|
# Calculate accuracy
|
||||||
|
_, imm_pred = torch.max(price_preds['immediate'], 1)
|
||||||
|
_, mid_pred = torch.max(price_preds['midterm'], 1)
|
||||||
|
_, long_pred = torch.max(price_preds['longterm'], 1)
|
||||||
|
|
||||||
|
imm_correct += (imm_pred == y_imm_batch).sum().item()
|
||||||
|
mid_correct += (mid_pred == y_mid_batch).sum().item()
|
||||||
|
long_correct += (long_pred == y_long_batch).sum().item()
|
||||||
|
|
||||||
|
# Calculate epoch metrics
|
||||||
|
train_loss /= len(train_loader)
|
||||||
|
imm_acc = imm_correct / total
|
||||||
|
mid_acc = mid_correct / total
|
||||||
|
long_acc = long_correct / total
|
||||||
|
|
||||||
|
# Validation phase
|
||||||
|
agent.policy_net.eval()
|
||||||
|
val_loss = 0.0
|
||||||
|
imm_val_correct, mid_val_correct, long_val_correct = 0, 0, 0
|
||||||
|
|
||||||
|
with torch.no_grad():
|
||||||
|
# Forward pass on validation data
|
||||||
|
q_values, _, val_price_preds, _ = agent.policy_net(X_val_tensor)
|
||||||
|
|
||||||
|
# Calculate validation losses
|
||||||
|
val_imm_loss = imm_criterion(val_price_preds['immediate'], y_imm_val_tensor)
|
||||||
|
val_mid_loss = mid_criterion(val_price_preds['midterm'], y_mid_val_tensor)
|
||||||
|
val_long_loss = long_criterion(val_price_preds['longterm'], y_long_val_tensor)
|
||||||
|
val_value_loss = value_criterion(val_price_preds['values'], y_val_val_tensor)
|
||||||
|
|
||||||
|
val_total_loss = val_imm_loss + 0.7 * val_mid_loss + 0.5 * val_long_loss + 0.3 * val_value_loss
|
||||||
|
val_loss = val_total_loss.item()
|
||||||
|
|
||||||
|
# Calculate validation accuracy
|
||||||
|
_, imm_val_pred = torch.max(val_price_preds['immediate'], 1)
|
||||||
|
_, mid_val_pred = torch.max(val_price_preds['midterm'], 1)
|
||||||
|
_, long_val_pred = torch.max(val_price_preds['longterm'], 1)
|
||||||
|
|
||||||
|
imm_val_correct = (imm_val_pred == y_imm_val_tensor).sum().item()
|
||||||
|
mid_val_correct = (mid_val_pred == y_mid_val_tensor).sum().item()
|
||||||
|
long_val_correct = (long_val_pred == y_long_val_tensor).sum().item()
|
||||||
|
|
||||||
|
imm_val_acc = imm_val_correct / len(X_val_tensor)
|
||||||
|
mid_val_acc = mid_val_correct / len(X_val_tensor)
|
||||||
|
long_val_acc = long_val_correct / len(X_val_tensor)
|
||||||
|
|
||||||
|
# Log to TensorBoard
|
||||||
|
writer.add_scalar('pretrain/train_loss', train_loss, epoch)
|
||||||
|
writer.add_scalar('pretrain/val_loss', val_loss, epoch)
|
||||||
|
writer.add_scalar('pretrain/imm_acc', imm_acc, epoch)
|
||||||
|
writer.add_scalar('pretrain/mid_acc', mid_acc, epoch)
|
||||||
|
writer.add_scalar('pretrain/long_acc', long_acc, epoch)
|
||||||
|
writer.add_scalar('pretrain/imm_val_acc', imm_val_acc, epoch)
|
||||||
|
writer.add_scalar('pretrain/mid_val_acc', mid_val_acc, epoch)
|
||||||
|
writer.add_scalar('pretrain/long_val_acc', long_val_acc, epoch)
|
||||||
|
|
||||||
|
# Learning rate scheduling
|
||||||
|
pretrain_scheduler.step(val_loss)
|
||||||
|
|
||||||
|
# Early stopping check
|
||||||
|
if val_loss < best_val_loss:
|
||||||
|
best_val_loss = val_loss
|
||||||
|
patience_counter = 0
|
||||||
|
# Copy policy_net weights to target_net
|
||||||
|
agent.target_net.load_state_dict(agent.policy_net.state_dict())
|
||||||
|
logger.info(f"Saved best model with validation loss: {val_loss:.4f}")
|
||||||
|
# Save pre-trained model
|
||||||
|
agent.save("NN/models/saved/enhanced_dqn_pretrained")
|
||||||
|
else:
|
||||||
|
patience_counter += 1
|
||||||
|
if patience_counter >= patience:
|
||||||
|
logger.info(f"Early stopping triggered after {epoch+1} epochs")
|
||||||
|
break
|
||||||
|
|
||||||
|
# Log progress
|
||||||
|
logger.info(f"Epoch {epoch+1}/{n_epochs}: "
|
||||||
|
f"Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, "
|
||||||
|
f"Imm Acc: {imm_acc:.4f}/{imm_val_acc:.4f}, "
|
||||||
|
f"Mid Acc: {mid_acc:.4f}/{mid_val_acc:.4f}, "
|
||||||
|
f"Long Acc: {long_acc:.4f}/{long_val_acc:.4f}")
|
||||||
|
|
||||||
|
# Set model back to training mode for next epoch
|
||||||
|
agent.policy_net.train()
|
||||||
|
|
||||||
|
writer.close()
|
||||||
|
logger.info("Price prediction pre-training complete")
|
||||||
|
return agent
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during price prediction pre-training: {str(e)}")
|
||||||
|
import traceback
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
return agent
|
||||||
|
|
||||||
|
def train_enhanced_rl(args):
|
||||||
|
"""
|
||||||
|
Train the enhanced RL agent for trading
|
||||||
|
|
||||||
|
Args:
|
||||||
|
args: Command line arguments
|
||||||
|
"""
|
||||||
|
# Setup device
|
||||||
|
if args.no_gpu:
|
||||||
|
device = torch.device('cpu')
|
||||||
|
else:
|
||||||
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||||
|
|
||||||
|
logger.info(f"Using device: {device}")
|
||||||
|
|
||||||
|
# Set up data interface
|
||||||
|
data_interface = DataInterface(symbol=args.symbol, timeframes=['1m', '5m', '15m'])
|
||||||
|
|
||||||
|
# Fetch historical data for each timeframe
|
||||||
|
for timeframe in data_interface.timeframes:
|
||||||
|
df = data_interface.get_historical_data(timeframe=timeframe)
|
||||||
|
logger.info(f"Using data for {args.symbol} {timeframe} ({len(data_interface.dataframes[timeframe])} candles)")
|
||||||
|
|
||||||
|
# Create environment for training
|
||||||
|
from NN.environments.trading_env import TradingEnvironment
|
||||||
|
window_size = 20
|
||||||
|
train_env = TradingEnvironment(
|
||||||
|
data_interface=data_interface,
|
||||||
|
initial_balance=10000.0,
|
||||||
|
transaction_fee=0.0002,
|
||||||
|
window_size=window_size,
|
||||||
|
max_position=1.0,
|
||||||
|
reward_scaling=100.0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Create agent with improved parameters
|
||||||
|
state_shape = train_env.observation_space.shape
|
||||||
|
n_actions = train_env.action_space.n
|
||||||
|
|
||||||
|
agent = EnhancedDQNAgent(
|
||||||
|
state_shape=state_shape,
|
||||||
|
n_actions=n_actions,
|
||||||
|
learning_rate=args.learning_rate,
|
||||||
|
gamma=0.95,
|
||||||
|
epsilon=1.0,
|
||||||
|
epsilon_min=0.05,
|
||||||
|
epsilon_decay=0.995,
|
||||||
|
buffer_size=50000,
|
||||||
|
batch_size=args.batch_size,
|
||||||
|
target_update=10,
|
||||||
|
confidence_threshold=args.confidence,
|
||||||
|
device=device
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load existing model if specified
|
||||||
|
if args.load_model:
|
||||||
|
model_path = args.load_model
|
||||||
|
if agent.load(model_path):
|
||||||
|
logger.info(f"Loaded existing model from {model_path}")
|
||||||
|
else:
|
||||||
|
logger.error(f"Error loading model from {model_path}")
|
||||||
|
|
||||||
|
# Pre-training for price prediction
|
||||||
|
if not args.no_pretrain and not args.load_model:
|
||||||
|
logger.info("Starting pre-training phase")
|
||||||
|
agent = pretrain_price_prediction(
|
||||||
|
agent=agent,
|
||||||
|
data_interface=data_interface,
|
||||||
|
n_epochs=args.pretrain_epochs,
|
||||||
|
batch_size=args.batch_size,
|
||||||
|
device=device
|
||||||
|
)
|
||||||
|
logger.info("Pre-training completed")
|
||||||
|
|
||||||
|
# Setup TensorBoard
|
||||||
|
writer = SummaryWriter(log_dir=f'runs/enhanced_rl_{int(time.time())}')
|
||||||
|
|
||||||
|
# Log hardware info
|
||||||
|
writer.add_text("hardware/device", str(device), 0)
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
for i in range(torch.cuda.device_count()):
|
||||||
|
writer.add_text(f"hardware/gpu_{i}", torch.cuda.get_device_name(i), 0)
|
||||||
|
|
||||||
|
# Move agent to device
|
||||||
|
agent.move_models_to_device(device)
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
logger.info(f"Starting enhanced training for {args.episodes} episodes")
|
||||||
|
|
||||||
|
total_rewards = []
|
||||||
|
episode_losses = []
|
||||||
|
trade_win_rates = []
|
||||||
|
best_reward = -np.inf
|
||||||
|
|
||||||
|
try:
|
||||||
|
for episode in range(args.episodes):
|
||||||
|
# Reset environment for new episode
|
||||||
|
state = train_env.reset()
|
||||||
|
total_reward = 0.0
|
||||||
|
done = False
|
||||||
|
step = 0
|
||||||
|
episode_start_time = time.time()
|
||||||
|
|
||||||
|
# Track trade statistics
|
||||||
|
trades = []
|
||||||
|
wins = 0
|
||||||
|
losses = 0
|
||||||
|
|
||||||
|
# Run episode
|
||||||
|
while not done and step < args.max_steps:
|
||||||
|
# Choose action
|
||||||
|
action, confidence = agent.act(state)
|
||||||
|
|
||||||
|
# Take action in environment
|
||||||
|
next_state, reward, done, info = train_env.step(action)
|
||||||
|
|
||||||
|
# Remember experience
|
||||||
|
agent.remember(state, action, reward, next_state, done)
|
||||||
|
|
||||||
|
# Track trade results
|
||||||
|
if 'trade_result' in info and info['trade_result'] is not None:
|
||||||
|
trade_result = info['trade_result']
|
||||||
|
trade_pnl = trade_result['pnl']
|
||||||
|
trades.append(trade_pnl)
|
||||||
|
|
||||||
|
if trade_pnl > 0:
|
||||||
|
wins += 1
|
||||||
|
logger.info(f"Profitable trade! {trade_pnl:.2f}% profit, reward: {reward:.4f}")
|
||||||
|
else:
|
||||||
|
losses += 1
|
||||||
|
logger.info(f"Loss trade! {trade_pnl:.2f}% loss, penalty: {reward:.4f}")
|
||||||
|
|
||||||
|
# Update state and counters
|
||||||
|
state = next_state
|
||||||
|
total_reward += reward
|
||||||
|
step += 1
|
||||||
|
|
||||||
|
# Train agent
|
||||||
|
loss = agent.replay()
|
||||||
|
if loss > 0:
|
||||||
|
episode_losses.append(loss)
|
||||||
|
|
||||||
|
# Log training metrics for each episode
|
||||||
|
episode_time = time.time() - episode_start_time
|
||||||
|
total_rewards.append(total_reward)
|
||||||
|
|
||||||
|
# Calculate win rate
|
||||||
|
win_rate = wins / max(1, (wins + losses))
|
||||||
|
trade_win_rates.append(win_rate)
|
||||||
|
|
||||||
|
# Log to console and TensorBoard
|
||||||
|
logger.info(f"Episode {episode}/{args.episodes} - Reward: {total_reward:.4f}, Win Rate: {win_rate:.2f}, "
|
||||||
|
f"Trades: {len(trades)}, Balance: ${train_env.balance:.2f}, Epsilon: {agent.epsilon:.4f}, "
|
||||||
|
f"Time: {episode_time:.2f}s")
|
||||||
|
|
||||||
|
writer.add_scalar('metrics/reward', total_reward, episode)
|
||||||
|
writer.add_scalar('metrics/balance', train_env.balance, episode)
|
||||||
|
writer.add_scalar('metrics/win_rate', win_rate, episode)
|
||||||
|
writer.add_scalar('metrics/trades', len(trades), episode)
|
||||||
|
writer.add_scalar('metrics/epsilon', agent.epsilon, episode)
|
||||||
|
|
||||||
|
if episode_losses:
|
||||||
|
avg_loss = sum(episode_losses) / len(episode_losses)
|
||||||
|
writer.add_scalar('metrics/loss', avg_loss, episode)
|
||||||
|
|
||||||
|
# Check if this is the best model so far
|
||||||
|
if total_reward > best_reward:
|
||||||
|
best_reward = total_reward
|
||||||
|
# Save best model
|
||||||
|
agent.save(f"NN/models/saved/enhanced_dqn_best")
|
||||||
|
logger.info(f"New best model saved with reward: {best_reward:.4f}")
|
||||||
|
|
||||||
|
# Save checkpoint every 10 episodes
|
||||||
|
if episode % 10 == 0 and episode > 0:
|
||||||
|
agent.save(f"NN/models/saved/enhanced_dqn_checkpoint")
|
||||||
|
logger.info(f"Checkpoint saved at episode {episode}")
|
||||||
|
|
||||||
|
# Reset episode losses
|
||||||
|
episode_losses = []
|
||||||
|
|
||||||
|
# Final save
|
||||||
|
agent.save(f"NN/models/saved/enhanced_dqn_final")
|
||||||
|
logger.info("Enhanced training completed, final model saved")
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("Training interrupted by user")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Training failed: {str(e)}")
|
||||||
|
import traceback
|
||||||
|
logger.error(traceback.format_exc())
|
||||||
|
finally:
|
||||||
|
# Close TensorBoard writer
|
||||||
|
writer.close()
|
||||||
|
|
||||||
|
return agent, train_env
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Create logs directory if it doesn't exist
|
||||||
|
os.makedirs("logs", exist_ok=True)
|
||||||
|
os.makedirs("NN/models/saved", exist_ok=True)
|
||||||
|
|
||||||
|
# Parse arguments
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
# Start training
|
||||||
|
train_enhanced_rl(args)
|
836
NN/train_rl.py
836
NN/train_rl.py
@ -52,7 +52,7 @@ class RLTradingEnvironment(gym.Env):
|
|||||||
Reinforcement Learning environment for trading with technical indicators
|
Reinforcement Learning environment for trading with technical indicators
|
||||||
from multiple timeframes
|
from multiple timeframes
|
||||||
"""
|
"""
|
||||||
def __init__(self, features_1m, features_1h=None, features_1d=None, window_size=20, trading_fee=0.0025, min_trade_interval=15):
|
def __init__(self, features_1m, features_1h, features_1d, window_size=20, trading_fee=0.0025, min_trade_interval=15):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
# Initialize attributes before parent class
|
# Initialize attributes before parent class
|
||||||
@ -60,12 +60,7 @@ class RLTradingEnvironment(gym.Env):
|
|||||||
self.num_features = features_1m.shape[1] - 1 # Exclude close price
|
self.num_features = features_1m.shape[1] - 1 # Exclude close price
|
||||||
|
|
||||||
# Count available timeframes
|
# Count available timeframes
|
||||||
self.num_timeframes = 1 # Always have 1m
|
self.num_timeframes = 3 # We require all timeframes now
|
||||||
if features_1h is not None:
|
|
||||||
self.num_timeframes += 1
|
|
||||||
if features_1d is not None:
|
|
||||||
self.num_timeframes += 1
|
|
||||||
|
|
||||||
self.feature_dim = self.num_features * self.num_timeframes
|
self.feature_dim = self.num_features * self.num_timeframes
|
||||||
|
|
||||||
# Store features from different timeframes
|
# Store features from different timeframes
|
||||||
@ -73,16 +68,6 @@ class RLTradingEnvironment(gym.Env):
|
|||||||
self.features_1h = features_1h
|
self.features_1h = features_1h
|
||||||
self.features_1d = features_1d
|
self.features_1d = features_1d
|
||||||
|
|
||||||
# Create synthetic 1s data from 1m (for demo purposes)
|
|
||||||
self.features_1s = self._create_synthetic_1s_data(features_1m)
|
|
||||||
|
|
||||||
# If higher timeframes are missing, create synthetic data
|
|
||||||
if self.features_1h is None:
|
|
||||||
self.features_1h = self._create_synthetic_hourly_data(features_1m)
|
|
||||||
|
|
||||||
if self.features_1d is None:
|
|
||||||
self.features_1d = self._create_synthetic_daily_data(features_1h)
|
|
||||||
|
|
||||||
# Trading parameters
|
# Trading parameters
|
||||||
self.initial_balance = 1.0
|
self.initial_balance = 1.0
|
||||||
self.trading_fee = trading_fee # Increased from 0.001 to 0.0025 (0.25%)
|
self.trading_fee = trading_fee # Increased from 0.001 to 0.0025 (0.25%)
|
||||||
@ -103,45 +88,6 @@ class RLTradingEnvironment(gym.Env):
|
|||||||
# Callback for visualization or external monitoring
|
# Callback for visualization or external monitoring
|
||||||
self.action_callback = None
|
self.action_callback = None
|
||||||
|
|
||||||
def _create_synthetic_1s_data(self, features_1m):
|
|
||||||
"""Create synthetic 1-second data from 1-minute data"""
|
|
||||||
# Simple approach: duplicate each 1m candle for 60 seconds with some noise
|
|
||||||
num_samples = features_1m.shape[0]
|
|
||||||
synthetic_1s = np.zeros((num_samples * 60, features_1m.shape[1]))
|
|
||||||
|
|
||||||
for i in range(num_samples):
|
|
||||||
for j in range(60):
|
|
||||||
idx = i * 60 + j
|
|
||||||
if idx < synthetic_1s.shape[0]:
|
|
||||||
# Copy the 1m data with small random noise
|
|
||||||
synthetic_1s[idx] = features_1m[i] * (1 + np.random.normal(0, 0.0001, features_1m.shape[1]))
|
|
||||||
|
|
||||||
return synthetic_1s
|
|
||||||
|
|
||||||
def _create_synthetic_hourly_data(self, features_1m):
|
|
||||||
"""Create synthetic hourly data from minute data"""
|
|
||||||
# Group by hour, taking every 60th candle
|
|
||||||
num_samples = features_1m.shape[0] // 60
|
|
||||||
synthetic_1h = np.zeros((num_samples, features_1m.shape[1]))
|
|
||||||
|
|
||||||
for i in range(num_samples):
|
|
||||||
if i * 60 < features_1m.shape[0]:
|
|
||||||
synthetic_1h[i] = features_1m[i * 60]
|
|
||||||
|
|
||||||
return synthetic_1h
|
|
||||||
|
|
||||||
def _create_synthetic_daily_data(self, features_1h):
|
|
||||||
"""Create synthetic daily data from hourly data"""
|
|
||||||
# Group by day, taking every 24th candle
|
|
||||||
num_samples = features_1h.shape[0] // 24
|
|
||||||
synthetic_1d = np.zeros((num_samples, features_1h.shape[1]))
|
|
||||||
|
|
||||||
for i in range(num_samples):
|
|
||||||
if i * 24 < features_1h.shape[0]:
|
|
||||||
synthetic_1d[i] = features_1h[i * 24]
|
|
||||||
|
|
||||||
return synthetic_1d
|
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
"""Reset the environment to initial state"""
|
"""Reset the environment to initial state"""
|
||||||
self.balance = self.initial_balance
|
self.balance = self.initial_balance
|
||||||
@ -208,161 +154,242 @@ class RLTradingEnvironment(gym.Env):
|
|||||||
return combined_features
|
return combined_features
|
||||||
|
|
||||||
def step(self, action):
|
def step(self, action):
|
||||||
"""
|
"""Take an action and return the next state, reward, done flag, and info"""
|
||||||
Take an action in the environment and return the next state, reward, done flag, and info
|
# Initialize info dictionary for additional data
|
||||||
|
info = {
|
||||||
|
'trade_executed': False,
|
||||||
|
'price_change': 0.0,
|
||||||
|
'position_change': 0,
|
||||||
|
'current_price': 0.0,
|
||||||
|
'next_price': 0.0,
|
||||||
|
'balance_change': 0.0,
|
||||||
|
'reward_components': {},
|
||||||
|
'future_prices': {}
|
||||||
|
}
|
||||||
|
|
||||||
Args:
|
# Get the current and next price
|
||||||
action (int): 0 = Buy, 1 = Sell, 2 = Hold
|
current_price = self.features_1m[self.current_step, -1]
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple: (observation, reward, done, info)
|
|
||||||
"""
|
|
||||||
# Get current and next price
|
|
||||||
current_price = self.features_1m[self.current_step, -1] # Close price is last column
|
|
||||||
|
|
||||||
# Check if we're at the end of the data
|
# Handle edge case at the end of the data
|
||||||
if self.current_step + 1 >= len(self.features_1m):
|
if self.current_step >= len(self.features_1m) - 1:
|
||||||
next_price = current_price # Use current price if at the end
|
next_price = current_price # Use current price as next price
|
||||||
done = True
|
done = True
|
||||||
else:
|
else:
|
||||||
next_price = self.features_1m[self.current_step + 1, -1]
|
next_price = self.features_1m[self.current_step + 1, -1]
|
||||||
done = False
|
done = False
|
||||||
|
|
||||||
# Handle zero or negative prices
|
# Handle zero or negative price (data error)
|
||||||
if current_price <= 0:
|
if current_price <= 0:
|
||||||
current_price = 1e-8 # Small positive number
|
current_price = 0.01 # Set to a small positive number
|
||||||
|
logger.warning(f"Zero or negative price detected at step {self.current_step}. Setting to 0.01.")
|
||||||
|
|
||||||
if next_price <= 0:
|
if next_price <= 0:
|
||||||
next_price = current_price # Use current price if next price is invalid
|
next_price = current_price # Use current price instead
|
||||||
|
logger.warning(f"Zero or negative next price detected at step {self.current_step + 1}. Using current price.")
|
||||||
price_change = (next_price - current_price) / current_price
|
|
||||||
|
|
||||||
# Default reward is slightly negative to discourage inaction
|
# Calculate price change as percentage
|
||||||
reward = -0.0001
|
price_change_pct = ((next_price - current_price) / current_price) * 100
|
||||||
profit_pct = None # Initialize profit_pct variable
|
|
||||||
|
|
||||||
# Check if enough time has passed since last trade
|
# Store prices in info
|
||||||
trade_interval = self.current_step - self.last_trade_step
|
info['current_price'] = current_price
|
||||||
trade_interval_penalty = 0
|
info['next_price'] = next_price
|
||||||
|
info['price_change'] = price_change_pct
|
||||||
|
|
||||||
# Execute action
|
# Initialize reward components dictionary
|
||||||
if action == 0: # BUY
|
reward_components = {
|
||||||
if self.position == 0: # Only buy if not already in position
|
'holding_reward': 0.0,
|
||||||
# Apply extra penalty for trading too frequently
|
'action_reward': 0.0,
|
||||||
if trade_interval < self.min_trade_interval:
|
'profit_reward': 0.0,
|
||||||
trade_interval_penalty = -0.002 * (self.min_trade_interval - trade_interval)
|
'trade_freq_penalty': 0.0
|
||||||
# Still allow the trade but with penalty
|
}
|
||||||
|
|
||||||
|
# Default small negative reward to discourage inaction
|
||||||
|
reward = -0.01
|
||||||
|
reward_components['holding_reward'] = -0.01
|
||||||
|
|
||||||
|
# Track previous balance for changes
|
||||||
|
previous_balance = self.balance
|
||||||
|
|
||||||
|
# Execute action (0: Buy, 1: Sell, 2: Hold)
|
||||||
|
if action == 0: # Buy
|
||||||
|
if self.position == 0: # Only buy if we don't already have a position
|
||||||
|
# Calculate how much of the asset we can buy with 100% of balance
|
||||||
|
self.position = self.balance / current_price
|
||||||
|
self.balance = 0 # All balance used
|
||||||
|
|
||||||
self.position = self.balance * (1 - self.trading_fee)
|
# If price goes up after buying, that's good
|
||||||
self.balance = 0
|
expected_profit = price_change_pct
|
||||||
self.trades += 1
|
# Scale reward based on expected profit
|
||||||
reward = -0.001 + trade_interval_penalty # Small cost for transaction + potential penalty
|
if expected_profit > 0:
|
||||||
self.trade_entry_price = current_price
|
# Positive reward for profitable buy decision
|
||||||
self.last_trade_step = self.current_step
|
action_reward = 0.1 + (expected_profit * 0.05) # Base reward + profit-based bonus
|
||||||
|
reward_components['action_reward'] = action_reward
|
||||||
elif action == 1: # SELL
|
reward += action_reward
|
||||||
if self.position > 0: # Only sell if in position
|
|
||||||
# Apply extra penalty for trading too frequently
|
|
||||||
if trade_interval < self.min_trade_interval:
|
|
||||||
trade_interval_penalty = -0.002 * (self.min_trade_interval - trade_interval)
|
|
||||||
# Still allow the trade but with penalty
|
|
||||||
|
|
||||||
# Calculate position value at current price
|
|
||||||
position_value = self.position * (1 + price_change)
|
|
||||||
self.balance = position_value * (1 - self.trading_fee)
|
|
||||||
|
|
||||||
# Calculate profit/loss from trade
|
|
||||||
profit_pct = (next_price - self.trade_entry_price) / self.trade_entry_price
|
|
||||||
# Scale reward by profit percentage and apply trade interval penalty
|
|
||||||
reward = (profit_pct * 10) + trade_interval_penalty
|
|
||||||
|
|
||||||
# Update win/loss count
|
|
||||||
if profit_pct > 0:
|
|
||||||
self.wins += 1
|
|
||||||
else:
|
else:
|
||||||
self.losses += 1
|
# Small negative reward for unprofitable buy
|
||||||
|
action_reward = -0.1 + (expected_profit * 0.03) # Smaller penalty for small losses
|
||||||
|
reward_components['action_reward'] = action_reward
|
||||||
|
reward += action_reward
|
||||||
|
|
||||||
# Record trade
|
# Check if we've traded too frequently
|
||||||
|
if len(self.trade_history) > 0:
|
||||||
|
last_trade_step = self.trade_history[-1]['step']
|
||||||
|
if self.current_step - last_trade_step < 5: # If less than 5 steps since last trade
|
||||||
|
freq_penalty = -0.2 # Penalty for trading too frequently
|
||||||
|
reward += freq_penalty
|
||||||
|
reward_components['trade_freq_penalty'] = freq_penalty
|
||||||
|
|
||||||
|
# Record the trade
|
||||||
self.trade_history.append({
|
self.trade_history.append({
|
||||||
'entry_price': self.trade_entry_price,
|
'step': self.current_step,
|
||||||
'exit_price': next_price,
|
'action': 'buy',
|
||||||
'profit_pct': profit_pct,
|
'price': current_price,
|
||||||
'trade_interval': trade_interval
|
'position': self.position,
|
||||||
|
'balance': self.balance
|
||||||
})
|
})
|
||||||
|
|
||||||
# Reset position and update last trade step
|
info['trade_executed'] = True
|
||||||
|
logger.info(f"Buy at step {self.current_step}, price: {current_price:.4f}, position: {self.position:.6f}")
|
||||||
|
|
||||||
|
elif action == 1: # Sell
|
||||||
|
if self.position > 0: # Only sell if we have a position
|
||||||
|
# Calculate sale proceeds
|
||||||
|
sale_value = self.position * current_price
|
||||||
|
|
||||||
|
# Calculate profit or loss percentage from last buy
|
||||||
|
last_buy_price = None
|
||||||
|
for trade in reversed(self.trade_history):
|
||||||
|
if trade['action'] == 'buy':
|
||||||
|
last_buy_price = trade['price']
|
||||||
|
break
|
||||||
|
|
||||||
|
# If we found the last buy price, calculate profit
|
||||||
|
if last_buy_price is not None:
|
||||||
|
profit_pct = ((current_price - last_buy_price) / last_buy_price) * 100
|
||||||
|
|
||||||
|
# Highly reward profitable trades
|
||||||
|
if profit_pct > 0:
|
||||||
|
# Progressive reward based on profit percentage
|
||||||
|
profit_reward = min(5.0, profit_pct * 0.2) # Cap at 5.0 to prevent exploitation
|
||||||
|
reward_components['profit_reward'] = profit_reward
|
||||||
|
reward += profit_reward
|
||||||
|
logger.info(f"Profitable trade! {profit_pct:.2f}% profit, reward: {profit_reward:.4f}")
|
||||||
|
else:
|
||||||
|
# Penalize losses more heavily based on size of loss
|
||||||
|
loss_penalty = max(-3.0, profit_pct * 0.15) # Cap at -3.0 to prevent excessive punishment
|
||||||
|
reward_components['profit_reward'] = loss_penalty
|
||||||
|
reward += loss_penalty
|
||||||
|
logger.info(f"Loss trade! {profit_pct:.2f}% loss, penalty: {loss_penalty:.4f}")
|
||||||
|
|
||||||
|
# If price goes down after selling, that's good
|
||||||
|
if price_change_pct < 0:
|
||||||
|
# Reward for good timing on sell (avoiding future loss)
|
||||||
|
timing_reward = min(1.0, abs(price_change_pct) * 0.05)
|
||||||
|
reward_components['action_reward'] = timing_reward
|
||||||
|
reward += timing_reward
|
||||||
|
|
||||||
|
# Check for trading too frequently
|
||||||
|
if len(self.trade_history) > 0:
|
||||||
|
last_trade_step = self.trade_history[-1]['step']
|
||||||
|
if self.current_step - last_trade_step < 5: # If less than 5 steps since last trade
|
||||||
|
freq_penalty = -0.2 # Penalty for trading too frequently
|
||||||
|
reward += freq_penalty
|
||||||
|
reward_components['trade_freq_penalty'] = freq_penalty
|
||||||
|
|
||||||
|
# Update balance and position
|
||||||
|
self.balance = sale_value
|
||||||
|
position_change = self.position
|
||||||
self.position = 0
|
self.position = 0
|
||||||
self.last_trade_step = self.current_step
|
|
||||||
|
# Record the trade
|
||||||
|
self.trade_history.append({
|
||||||
|
'step': self.current_step,
|
||||||
|
'action': 'sell',
|
||||||
|
'price': current_price,
|
||||||
|
'position': self.position,
|
||||||
|
'balance': self.balance
|
||||||
|
})
|
||||||
|
|
||||||
|
info['trade_executed'] = True
|
||||||
|
info['position_change'] = position_change
|
||||||
|
logger.info(f"Sell at step {self.current_step}, price: {current_price:.4f}, new balance: {self.balance:.4f}")
|
||||||
|
|
||||||
|
elif action == 2: # Hold
|
||||||
|
# Small reward if holding was a good decision
|
||||||
|
if self.position > 0 and price_change_pct > 0: # Holding long position during price increase
|
||||||
|
hold_reward = price_change_pct * 0.01 # Small reward proportional to price increase
|
||||||
|
reward += hold_reward
|
||||||
|
reward_components['holding_reward'] = hold_reward
|
||||||
|
elif self.position == 0 and price_change_pct < 0: # Holding cash during price decrease
|
||||||
|
hold_reward = abs(price_change_pct) * 0.01 # Small reward for avoiding loss
|
||||||
|
reward += hold_reward
|
||||||
|
reward_components['holding_reward'] = hold_reward
|
||||||
|
|
||||||
# else: (action == 2 - HOLD) - no position change
|
# Move to the next step
|
||||||
|
|
||||||
# Move to next step
|
|
||||||
self.current_step += 1
|
self.current_step += 1
|
||||||
|
|
||||||
# Check if done (reached end of data)
|
# Update current portfolio value
|
||||||
|
if self.position > 0:
|
||||||
|
self.current_value = self.balance + (self.position * next_price)
|
||||||
|
else:
|
||||||
|
self.current_value = self.balance
|
||||||
|
|
||||||
|
# Calculate balance change
|
||||||
|
balance_change = self.current_value - previous_balance
|
||||||
|
info['balance_change'] = balance_change
|
||||||
|
|
||||||
|
# Check if we've reached the end of the data
|
||||||
if self.current_step >= len(self.features_1m) - 1:
|
if self.current_step >= len(self.features_1m) - 1:
|
||||||
done = True
|
done = True
|
||||||
|
|
||||||
# Apply final evaluation
|
# Final evaluation if we have a position
|
||||||
if self.position > 0:
|
if self.position > 0:
|
||||||
# Force close position at the end
|
# Sell remaining position at the final price
|
||||||
position_value = self.position * (1 + price_change)
|
final_balance = self.balance + (self.position * next_price)
|
||||||
self.balance = position_value * (1 - self.trading_fee)
|
|
||||||
profit_pct = (next_price - self.trade_entry_price) / self.trade_entry_price
|
|
||||||
reward += profit_pct * 10
|
|
||||||
|
|
||||||
# Update win/loss count
|
# Calculate final portfolio value and return
|
||||||
if profit_pct > 0:
|
final_return_pct = ((final_balance - self.initial_balance) / self.initial_balance) * 100
|
||||||
self.wins += 1
|
|
||||||
else:
|
# Add big reward/penalty based on overall performance
|
||||||
self.losses += 1
|
performance_reward = final_return_pct * 0.1
|
||||||
|
reward += performance_reward
|
||||||
|
reward_components['final_performance'] = performance_reward
|
||||||
|
|
||||||
|
logger.info(f"Episode ended. Final balance: {final_balance:.4f}, Return: {final_return_pct:.2f}%")
|
||||||
|
|
||||||
# Get the next observation
|
# Get future prices for evaluation (1-hour and 1-day ahead)
|
||||||
observation = self._get_observation()
|
info['future_prices'] = {}
|
||||||
|
|
||||||
# Calculate metrics for info
|
# 1-hour future price if hourly data is available
|
||||||
total_value = self.balance + self.position * next_price
|
if hasattr(self, 'features_1h') and self.features_1h is not None:
|
||||||
gain = (total_value - self.initial_balance) / self.initial_balance
|
# Find the closest hourly data point
|
||||||
self.win_rate = self.wins / max(1, self.trades)
|
if self.current_step < len(self.features_1m):
|
||||||
|
current_time = self.current_step # Use as index for simplicity
|
||||||
|
hourly_idx = min(current_time // 60, len(self.features_1h) - 1) # Assuming 60 minutes per hour
|
||||||
|
if hourly_idx < len(self.features_1h) - 1:
|
||||||
|
future_1h_price = self.features_1h[hourly_idx + 1, -1]
|
||||||
|
info['future_prices']['1h'] = future_1h_price
|
||||||
|
|
||||||
# Check if we have prediction data for future timeframes
|
# 1-day future price if daily data is available
|
||||||
future_price_1h = None
|
if hasattr(self, 'features_1d') and self.features_1d is not None:
|
||||||
future_price_1d = None
|
# Find the closest daily data point
|
||||||
|
if self.current_step < len(self.features_1m):
|
||||||
|
current_time = self.current_step # Use as index for simplicity
|
||||||
|
daily_idx = min(current_time // 1440, len(self.features_1d) - 1) # Assuming 1440 minutes per day
|
||||||
|
if daily_idx < len(self.features_1d) - 1:
|
||||||
|
future_1d_price = self.features_1d[daily_idx + 1, -1]
|
||||||
|
info['future_prices']['1d'] = future_1d_price
|
||||||
|
|
||||||
# Get hourly index
|
# Get next observation
|
||||||
idx_1h = self.current_step // 60
|
next_state = self._get_observation()
|
||||||
if idx_1h + 1 < len(self.features_1h):
|
|
||||||
hourly_close_idx = self.features_1h.shape[1] - 1 # Assuming close is last column
|
|
||||||
current_1h_price = self.features_1h[idx_1h, hourly_close_idx]
|
|
||||||
next_1h_price = self.features_1h[idx_1h + 1, hourly_close_idx]
|
|
||||||
future_price_1h = (next_1h_price - current_1h_price) / current_1h_price
|
|
||||||
|
|
||||||
# Get daily index
|
# Store reward components in info
|
||||||
idx_1d = idx_1h // 24
|
info['reward_components'] = reward_components
|
||||||
if idx_1d + 1 < len(self.features_1d):
|
|
||||||
daily_close_idx = self.features_1d.shape[1] - 1 # Assuming close is last column
|
|
||||||
current_1d_price = self.features_1d[idx_1d, daily_close_idx]
|
|
||||||
next_1d_price = self.features_1d[idx_1d + 1, daily_close_idx]
|
|
||||||
future_price_1d = (next_1d_price - current_1d_price) / current_1d_price
|
|
||||||
|
|
||||||
info = {
|
# Clip reward to prevent extreme values
|
||||||
'balance': self.balance,
|
reward = np.clip(reward, -10.0, 10.0)
|
||||||
'position': self.position,
|
|
||||||
'total_value': total_value,
|
|
||||||
'gain': gain,
|
|
||||||
'trades': self.trades,
|
|
||||||
'win_rate': self.win_rate,
|
|
||||||
'profit_pct': profit_pct if action == 1 and self.position == 0 else None,
|
|
||||||
'current_price': current_price,
|
|
||||||
'next_price': next_price,
|
|
||||||
'future_price_1h': future_price_1h, # Actual future hourly price change
|
|
||||||
'future_price_1d': future_price_1d # Actual future daily price change
|
|
||||||
}
|
|
||||||
|
|
||||||
# Call the callback if it exists
|
return next_state, reward, done, info
|
||||||
if self.action_callback:
|
|
||||||
self.action_callback(action, current_price, reward, info)
|
|
||||||
|
|
||||||
return observation, reward, done, info
|
|
||||||
|
|
||||||
def set_action_callback(self, callback):
|
def set_action_callback(self, callback):
|
||||||
"""
|
"""
|
||||||
@ -375,9 +402,9 @@ class RLTradingEnvironment(gym.Env):
|
|||||||
|
|
||||||
def train_rl(env_class=None, num_episodes=5000, max_steps=2000, save_path="NN/models/saved/dqn_agent",
|
def train_rl(env_class=None, num_episodes=5000, max_steps=2000, save_path="NN/models/saved/dqn_agent",
|
||||||
action_callback=None, episode_callback=None, symbol="BTC/USDT",
|
action_callback=None, episode_callback=None, symbol="BTC/USDT",
|
||||||
pretrain_price_prediction_enabled=True, pretrain_epochs=10):
|
pretrain_price_prediction_enabled=False, pretrain_epochs=10):
|
||||||
"""
|
"""
|
||||||
Train a reinforcement learning agent for trading
|
Train a reinforcement learning agent for trading using ONLY real market data
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
env_class: Optional environment class override
|
env_class: Optional environment class override
|
||||||
@ -387,34 +414,38 @@ def train_rl(env_class=None, num_episodes=5000, max_steps=2000, save_path="NN/mo
|
|||||||
action_callback: Callback function for monitoring actions
|
action_callback: Callback function for monitoring actions
|
||||||
episode_callback: Callback function for monitoring episodes
|
episode_callback: Callback function for monitoring episodes
|
||||||
symbol: Trading symbol to use
|
symbol: Trading symbol to use
|
||||||
pretrain_price_prediction_enabled: Whether to pre-train price prediction
|
pretrain_price_prediction_enabled: DEPRECATED - No longer supported (synthetic data not used)
|
||||||
pretrain_epochs: Number of epochs for pre-training
|
pretrain_epochs: DEPRECATED - No longer supported (synthetic data not used)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple: (trained agent, environment)
|
tuple: (trained agent, environment)
|
||||||
"""
|
"""
|
||||||
# Load data for the selected symbol
|
# Load data for the selected symbol
|
||||||
data_interface = DataInterface(symbol=symbol, timeframes=['1m', '5m', '15m'])
|
data_interface = DataInterface(symbol=symbol, timeframes=['1m', '5m', '15m', '1h', '1d'])
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Try to load data for the requested symbol using get_historical_data method
|
# Try to load data for the requested symbol using get_historical_data method
|
||||||
data_1m = data_interface.get_historical_data(timeframe='1m', n_candles=5000)
|
data_1m = data_interface.get_historical_data(timeframe='1m', n_candles=5000)
|
||||||
data_5m = data_interface.get_historical_data(timeframe='5m', n_candles=5000)
|
data_5m = data_interface.get_historical_data(timeframe='5m', n_candles=5000)
|
||||||
data_15m = data_interface.get_historical_data(timeframe='15m', n_candles=5000)
|
data_15m = data_interface.get_historical_data(timeframe='15m', n_candles=5000)
|
||||||
|
data_1h = data_interface.get_historical_data(timeframe='1h', n_candles=1000)
|
||||||
|
data_1d = data_interface.get_historical_data(timeframe='1d', n_candles=500)
|
||||||
|
|
||||||
if data_1m is None or data_5m is None or data_15m is None:
|
if data_1m is None or data_5m is None or data_15m is None or data_1h is None or data_1d is None:
|
||||||
raise FileNotFoundError("Could not retrieve data for specified symbol")
|
raise FileNotFoundError("Could not retrieve all required timeframes data for specified symbol")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.warning(f"Data for {symbol} not available: {str(e)}. Using default data.")
|
logger.warning(f"Data for {symbol} not available: {str(e)}. Using default cached data.")
|
||||||
# Try to use cached data if available
|
# Try to use cached data if available
|
||||||
symbol = "BTC/USDT"
|
symbol = "BTC/USDT"
|
||||||
data_interface = DataInterface(symbol=symbol, timeframes=['1m', '5m', '15m'])
|
data_interface = DataInterface(symbol=symbol, timeframes=['1m', '5m', '15m', '1h', '1d'])
|
||||||
data_1m = data_interface.get_historical_data(timeframe='1m', n_candles=5000)
|
data_1m = data_interface.get_historical_data(timeframe='1m', n_candles=5000)
|
||||||
data_5m = data_interface.get_historical_data(timeframe='5m', n_candles=5000)
|
data_5m = data_interface.get_historical_data(timeframe='5m', n_candles=5000)
|
||||||
data_15m = data_interface.get_historical_data(timeframe='15m', n_candles=5000)
|
data_15m = data_interface.get_historical_data(timeframe='15m', n_candles=5000)
|
||||||
|
data_1h = data_interface.get_historical_data(timeframe='1h', n_candles=1000)
|
||||||
|
data_1d = data_interface.get_historical_data(timeframe='1d', n_candles=500)
|
||||||
|
|
||||||
if data_1m is None or data_5m is None or data_15m is None:
|
if data_1m is None or data_5m is None or data_15m is None or data_1h is None or data_1d is None:
|
||||||
logger.error("Failed to retrieve any data. Cannot continue training.")
|
logger.error("Failed to retrieve all required timeframes data. Cannot continue training.")
|
||||||
raise ValueError("No data available for training")
|
raise ValueError("No data available for training")
|
||||||
|
|
||||||
# Create features from the data by adding technical indicators and converting to numpy format
|
# Create features from the data by adding technical indicators and converting to numpy format
|
||||||
@ -447,19 +478,39 @@ def train_rl(env_class=None, num_episodes=5000, max_steps=2000, save_path="NN/mo
|
|||||||
])
|
])
|
||||||
else:
|
else:
|
||||||
features_15m = None
|
features_15m = None
|
||||||
|
|
||||||
|
if data_1h is not None:
|
||||||
|
data_1h = data_interface.add_technical_indicators(data_1h)
|
||||||
|
# Convert to numpy array with close price as the last column
|
||||||
|
features_1h = np.hstack([
|
||||||
|
data_1h.drop(['timestamp', 'close'], axis=1).values,
|
||||||
|
data_1h['close'].values.reshape(-1, 1)
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
features_1h = None
|
||||||
|
|
||||||
|
if data_1d is not None:
|
||||||
|
data_1d = data_interface.add_technical_indicators(data_1d)
|
||||||
|
# Convert to numpy array with close price as the last column
|
||||||
|
features_1d = np.hstack([
|
||||||
|
data_1d.drop(['timestamp', 'close'], axis=1).values,
|
||||||
|
data_1d['close'].values.reshape(-1, 1)
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
features_1d = None
|
||||||
|
|
||||||
# Check if we have all the required features
|
# Check if we have all the required features
|
||||||
if features_1m is None or features_5m is None or features_15m is None:
|
if features_1m is None or features_5m is None or features_15m is None or features_1h is None or features_1d is None:
|
||||||
logger.error("Failed to create features for all timeframes.")
|
logger.error("Failed to create features for all timeframes.")
|
||||||
raise ValueError("Could not create features for training")
|
raise ValueError("Could not create features for training")
|
||||||
|
|
||||||
# Create the environment
|
# Create the environment
|
||||||
if env_class:
|
if env_class:
|
||||||
# Use provided environment class
|
# Use provided environment class
|
||||||
env = env_class(features_1m, features_5m, features_15m)
|
env = env_class(features_1m, features_1h, features_1d)
|
||||||
else:
|
else:
|
||||||
# Use the default environment
|
# Use the default environment
|
||||||
env = RLTradingEnvironment(features_1m, features_5m, features_15m)
|
env = RLTradingEnvironment(features_1m, features_1h, features_1d)
|
||||||
|
|
||||||
# Set action callback if provided
|
# Set action callback if provided
|
||||||
if action_callback:
|
if action_callback:
|
||||||
@ -494,29 +545,10 @@ def train_rl(env_class=None, num_episodes=5000, max_steps=2000, save_path="NN/mo
|
|||||||
else:
|
else:
|
||||||
logger.info("No existing model found. Starting with a new model.")
|
logger.info("No existing model found. Starting with a new model.")
|
||||||
|
|
||||||
# Pre-train price prediction if enabled and we have a new model
|
# Remove pre-training code since it used synthetic data
|
||||||
|
# Pre-training with real data would require a separate implementation
|
||||||
if pretrain_price_prediction_enabled:
|
if pretrain_price_prediction_enabled:
|
||||||
if not os.path.exists(model_file) or input("Pre-train price prediction? (y/n): ").lower() == 'y':
|
logger.warning("Pre-training with synthetic data is no longer supported. Continuing with RL training only.")
|
||||||
logger.info("Pre-training price prediction capability...")
|
|
||||||
# Attempt to load hourly and daily data for pre-training
|
|
||||||
try:
|
|
||||||
data_interface.add_timeframe('1h')
|
|
||||||
data_interface.add_timeframe('1d')
|
|
||||||
|
|
||||||
# Run pre-training
|
|
||||||
agent = pretrain_price_prediction(
|
|
||||||
agent=agent,
|
|
||||||
data_interface=data_interface,
|
|
||||||
n_epochs=pretrain_epochs,
|
|
||||||
batch_size=128
|
|
||||||
)
|
|
||||||
|
|
||||||
# Save the pre-trained model
|
|
||||||
agent.save(f"{save_path}_pretrained")
|
|
||||||
logger.info("Pre-trained model saved.")
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error during pre-training: {e}")
|
|
||||||
logger.warning("Continuing with RL training without pre-training.")
|
|
||||||
|
|
||||||
# Create TensorBoard writer
|
# Create TensorBoard writer
|
||||||
writer = SummaryWriter(log_dir=f'runs/dqn_{int(time.time())}')
|
writer = SummaryWriter(log_dir=f'runs/dqn_{int(time.time())}')
|
||||||
@ -582,8 +614,8 @@ def train_rl(env_class=None, num_episodes=5000, max_steps=2000, save_path="NN/mo
|
|||||||
total_rewards.append(total_reward)
|
total_rewards.append(total_reward)
|
||||||
|
|
||||||
# Calculate trading metrics
|
# Calculate trading metrics
|
||||||
win_rate = env.win_rate if hasattr(env, 'win_rate') else 0
|
win_rate = env.wins / max(1, env.trades)
|
||||||
trades = env.trades if hasattr(env, 'trades') else 0
|
trades = env.trades
|
||||||
|
|
||||||
# Log to TensorBoard
|
# Log to TensorBoard
|
||||||
writer.add_scalar('Reward/Episode', total_reward, episode)
|
writer.add_scalar('Reward/Episode', total_reward, episode)
|
||||||
@ -621,379 +653,5 @@ def train_rl(env_class=None, num_episodes=5000, max_steps=2000, save_path="NN/mo
|
|||||||
|
|
||||||
return agent, env
|
return agent, env
|
||||||
|
|
||||||
def generate_price_prediction_training_data(data_1m, data_1h, data_1d, window_size=20):
|
|
||||||
"""
|
|
||||||
Generate labeled training data for price prediction at different timeframes
|
|
||||||
|
|
||||||
Args:
|
|
||||||
data_1m: DataFrame with 1-minute data
|
|
||||||
data_1h: DataFrame with 1-hour data
|
|
||||||
data_1d: DataFrame with 1-day data
|
|
||||||
window_size: Size of the input window
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
tuple: (X, y_immediate, y_midterm, y_longterm, y_values)
|
|
||||||
- X: input features (window sequences)
|
|
||||||
- y_immediate: immediate direction labels (0=down, 1=sideways, 2=up)
|
|
||||||
- y_midterm: mid-term direction labels
|
|
||||||
- y_longterm: long-term direction labels
|
|
||||||
- y_values: actual percentage changes for each timeframe
|
|
||||||
"""
|
|
||||||
logger.info("Generating price prediction training data from historical prices")
|
|
||||||
|
|
||||||
# Prepare data structures
|
|
||||||
X = []
|
|
||||||
y_immediate = [] # 1m
|
|
||||||
y_midterm = [] # 1h
|
|
||||||
y_longterm = [] # 1d
|
|
||||||
y_values = [] # Actual percentage changes
|
|
||||||
|
|
||||||
# Calculate future returns for labeling
|
|
||||||
data_1m['future_return_1m'] = data_1m['close'].pct_change(1).shift(-1) # Next candle
|
|
||||||
data_1m['future_return_10m'] = data_1m['close'].pct_change(10).shift(-10) # Next 10 candles
|
|
||||||
|
|
||||||
# Add indices to align data
|
|
||||||
data_1m['index'] = range(len(data_1m))
|
|
||||||
data_1h['index'] = range(len(data_1h))
|
|
||||||
data_1d['index'] = range(len(data_1d))
|
|
||||||
|
|
||||||
# Define thresholds for direction labels
|
|
||||||
immediate_threshold = 0.0005
|
|
||||||
midterm_threshold = 0.001
|
|
||||||
longterm_threshold = 0.002
|
|
||||||
|
|
||||||
# Loop through 1m data to create training samples
|
|
||||||
max_idx = len(data_1m) - window_size - 10 # Ensure we have future data for labels
|
|
||||||
sample_indices = random.sample(range(window_size, max_idx), min(10000, max_idx - window_size))
|
|
||||||
|
|
||||||
for idx in sample_indices:
|
|
||||||
# Get window of 1m data
|
|
||||||
window_1m = data_1m.iloc[idx-window_size:idx].drop(['timestamp', 'future_return_1m', 'future_return_10m', 'index'], axis=1, errors='ignore')
|
|
||||||
|
|
||||||
# Skip if window contains NaN
|
|
||||||
if window_1m.isnull().values.any():
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Get future returns for labeling
|
|
||||||
future_return_1m = data_1m.iloc[idx]['future_return_1m']
|
|
||||||
future_return_10m = data_1m.iloc[idx]['future_return_10m']
|
|
||||||
|
|
||||||
# Find corresponding row in 1h data (closest timestamp)
|
|
||||||
current_timestamp = data_1m.iloc[idx]['timestamp']
|
|
||||||
|
|
||||||
# Find 1h candle for mid-term prediction
|
|
||||||
if 'timestamp' in data_1h.columns:
|
|
||||||
# Find closest 1h candle
|
|
||||||
closest_1h_idx = data_1h['timestamp'].searchsorted(current_timestamp)
|
|
||||||
if closest_1h_idx >= len(data_1h):
|
|
||||||
closest_1h_idx = len(data_1h) - 1
|
|
||||||
|
|
||||||
# Get future 1h return (next candle)
|
|
||||||
if closest_1h_idx < len(data_1h) - 1:
|
|
||||||
future_return_1h = (data_1h.iloc[closest_1h_idx + 1]['close'] - data_1h.iloc[closest_1h_idx]['close']) / data_1h.iloc[closest_1h_idx]['close']
|
|
||||||
else:
|
|
||||||
future_return_1h = 0
|
|
||||||
else:
|
|
||||||
future_return_1h = future_return_10m # Fallback
|
|
||||||
|
|
||||||
# Find 1d candle for long-term prediction
|
|
||||||
if 'timestamp' in data_1d.columns:
|
|
||||||
# Find closest 1d candle
|
|
||||||
closest_1d_idx = data_1d['timestamp'].searchsorted(current_timestamp)
|
|
||||||
if closest_1d_idx >= len(data_1d):
|
|
||||||
closest_1d_idx = len(data_1d) - 1
|
|
||||||
|
|
||||||
# Get future 1d return (next candle)
|
|
||||||
if closest_1d_idx < len(data_1d) - 1:
|
|
||||||
future_return_1d = (data_1d.iloc[closest_1d_idx + 1]['close'] - data_1d.iloc[closest_1d_idx]['close']) / data_1d.iloc[closest_1d_idx]['close']
|
|
||||||
else:
|
|
||||||
future_return_1d = 0
|
|
||||||
else:
|
|
||||||
future_return_1d = future_return_1h * 2 # Fallback
|
|
||||||
|
|
||||||
# Create direction labels
|
|
||||||
# 0=down, 1=sideways, 2=up
|
|
||||||
|
|
||||||
# Immediate (1m)
|
|
||||||
if future_return_1m > immediate_threshold:
|
|
||||||
immediate_label = 2 # UP
|
|
||||||
elif future_return_1m < -immediate_threshold:
|
|
||||||
immediate_label = 0 # DOWN
|
|
||||||
else:
|
|
||||||
immediate_label = 1 # SIDEWAYS
|
|
||||||
|
|
||||||
# Mid-term (1h)
|
|
||||||
if future_return_1h > midterm_threshold:
|
|
||||||
midterm_label = 2 # UP
|
|
||||||
elif future_return_1h < -midterm_threshold:
|
|
||||||
midterm_label = 0 # DOWN
|
|
||||||
else:
|
|
||||||
midterm_label = 1 # SIDEWAYS
|
|
||||||
|
|
||||||
# Long-term (1d)
|
|
||||||
if future_return_1d > longterm_threshold:
|
|
||||||
longterm_label = 2 # UP
|
|
||||||
elif future_return_1d < -longterm_threshold:
|
|
||||||
longterm_label = 0 # DOWN
|
|
||||||
else:
|
|
||||||
longterm_label = 1 # SIDEWAYS
|
|
||||||
|
|
||||||
# Store data
|
|
||||||
X.append(window_1m.values)
|
|
||||||
y_immediate.append(immediate_label)
|
|
||||||
y_midterm.append(midterm_label)
|
|
||||||
y_longterm.append(longterm_label)
|
|
||||||
y_values.append([future_return_1m, future_return_1h, future_return_1d, future_return_1d * 1.5]) # Add weekly estimate
|
|
||||||
|
|
||||||
# Convert to numpy arrays
|
|
||||||
X = np.array(X)
|
|
||||||
y_immediate = np.array(y_immediate)
|
|
||||||
y_midterm = np.array(y_midterm)
|
|
||||||
y_longterm = np.array(y_longterm)
|
|
||||||
y_values = np.array(y_values)
|
|
||||||
|
|
||||||
logger.info(f"Generated {len(X)} price prediction training samples")
|
|
||||||
|
|
||||||
# Log class distribution
|
|
||||||
for name, y in [("Immediate", y_immediate), ("Mid-term", y_midterm), ("Long-term", y_longterm)]:
|
|
||||||
down = (y == 0).sum()
|
|
||||||
sideways = (y == 1).sum()
|
|
||||||
up = (y == 2).sum()
|
|
||||||
logger.info(f"{name} direction distribution: DOWN={down} ({down/len(y)*100:.1f}%), "
|
|
||||||
f"SIDEWAYS={sideways} ({sideways/len(y)*100:.1f}%), "
|
|
||||||
f"UP={up} ({up/len(y)*100:.1f}%)")
|
|
||||||
|
|
||||||
return X, y_immediate, y_midterm, y_longterm, y_values
|
|
||||||
|
|
||||||
def pretrain_price_prediction(agent, data_interface, n_epochs=10, batch_size=128):
|
|
||||||
"""
|
|
||||||
Pre-train the agent's price prediction capability on historical data
|
|
||||||
|
|
||||||
Args:
|
|
||||||
agent: DQNAgent instance to train
|
|
||||||
data_interface: DataInterface instance for accessing data
|
|
||||||
n_epochs: Number of epochs for training
|
|
||||||
batch_size: Batch size for training
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The agent with pre-trained price prediction capabilities
|
|
||||||
"""
|
|
||||||
logger.info("Starting supervised pre-training of price prediction")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Load data for all required timeframes
|
|
||||||
data_1m = data_interface.get_historical_data(timeframe='1m', n_candles=10000)
|
|
||||||
data_1h = data_interface.get_historical_data(timeframe='1h', n_candles=1000)
|
|
||||||
data_1d = data_interface.get_historical_data(timeframe='1d', n_candles=500)
|
|
||||||
|
|
||||||
# Check if data is available
|
|
||||||
if data_1m is None:
|
|
||||||
logger.warning("1m data not available for pre-training")
|
|
||||||
return agent
|
|
||||||
|
|
||||||
if data_1h is None:
|
|
||||||
logger.warning("1h data not available, using synthesized data")
|
|
||||||
# Create synthetic 1h data from 1m data
|
|
||||||
data_1h = data_1m.iloc[::60].reset_index(drop=True).copy() # Take every 60th record
|
|
||||||
|
|
||||||
if data_1d is None:
|
|
||||||
logger.warning("1d data not available, using synthesized data")
|
|
||||||
# Create synthetic 1d data from 1h data
|
|
||||||
data_1d = data_1h.iloc[::24].reset_index(drop=True).copy() # Take every 24th record
|
|
||||||
|
|
||||||
# Add technical indicators to all data
|
|
||||||
data_1m = data_interface.add_technical_indicators(data_1m)
|
|
||||||
data_1h = data_interface.add_technical_indicators(data_1h)
|
|
||||||
data_1d = data_interface.add_technical_indicators(data_1d)
|
|
||||||
|
|
||||||
# Generate labeled training data
|
|
||||||
X, y_immediate, y_midterm, y_longterm, y_values = generate_price_prediction_training_data(
|
|
||||||
data_1m, data_1h, data_1d, window_size=20
|
|
||||||
)
|
|
||||||
|
|
||||||
# Split data into training and validation sets
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
X_train, X_val, y_imm_train, y_imm_val, y_mid_train, y_mid_val, y_long_train, y_long_val, y_val_train, y_val_val = train_test_split(
|
|
||||||
X, y_immediate, y_midterm, y_longterm, y_values, test_size=0.2, random_state=42
|
|
||||||
)
|
|
||||||
|
|
||||||
# Convert to torch tensors
|
|
||||||
X_train_tensor = torch.FloatTensor(X_train).to(agent.device)
|
|
||||||
y_imm_train_tensor = torch.LongTensor(y_imm_train).to(agent.device)
|
|
||||||
y_mid_train_tensor = torch.LongTensor(y_mid_train).to(agent.device)
|
|
||||||
y_long_train_tensor = torch.LongTensor(y_long_train).to(agent.device)
|
|
||||||
y_val_train_tensor = torch.FloatTensor(y_val_train).to(agent.device)
|
|
||||||
|
|
||||||
X_val_tensor = torch.FloatTensor(X_val).to(agent.device)
|
|
||||||
y_imm_val_tensor = torch.LongTensor(y_imm_val).to(agent.device)
|
|
||||||
y_mid_val_tensor = torch.LongTensor(y_mid_val).to(agent.device)
|
|
||||||
y_long_val_tensor = torch.LongTensor(y_long_val).to(agent.device)
|
|
||||||
y_val_val_tensor = torch.FloatTensor(y_val_val).to(agent.device)
|
|
||||||
|
|
||||||
# Calculate class weights for imbalanced data
|
|
||||||
from torch.nn.functional import one_hot
|
|
||||||
|
|
||||||
# Function to calculate class weights
|
|
||||||
def get_class_weights(labels):
|
|
||||||
counts = np.bincount(labels)
|
|
||||||
if len(counts) < 3: # Ensure we have 3 classes
|
|
||||||
counts = np.append(counts, [0] * (3 - len(counts)))
|
|
||||||
weights = 1.0 / np.array(counts)
|
|
||||||
weights = weights / np.sum(weights) # Normalize
|
|
||||||
return weights
|
|
||||||
|
|
||||||
imm_weights = torch.FloatTensor(get_class_weights(y_imm_train)).to(agent.device)
|
|
||||||
mid_weights = torch.FloatTensor(get_class_weights(y_mid_train)).to(agent.device)
|
|
||||||
long_weights = torch.FloatTensor(get_class_weights(y_long_train)).to(agent.device)
|
|
||||||
|
|
||||||
# Create DataLoader for batch training
|
|
||||||
from torch.utils.data import TensorDataset, DataLoader
|
|
||||||
|
|
||||||
train_dataset = TensorDataset(
|
|
||||||
X_train_tensor, y_imm_train_tensor, y_mid_train_tensor,
|
|
||||||
y_long_train_tensor, y_val_train_tensor
|
|
||||||
)
|
|
||||||
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
|
|
||||||
|
|
||||||
# Set up loss functions with class weights
|
|
||||||
imm_criterion = nn.CrossEntropyLoss(weight=imm_weights)
|
|
||||||
mid_criterion = nn.CrossEntropyLoss(weight=mid_weights)
|
|
||||||
long_criterion = nn.CrossEntropyLoss(weight=long_weights)
|
|
||||||
value_criterion = nn.MSELoss()
|
|
||||||
|
|
||||||
# Set up optimizer (separate from agent's optimizer)
|
|
||||||
pretrain_optimizer = torch.optim.Adam(agent.policy_net.parameters(), lr=0.0002)
|
|
||||||
pretrain_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
|
|
||||||
pretrain_optimizer, mode='min', factor=0.5, patience=3, verbose=True
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set model to training mode
|
|
||||||
agent.policy_net.train()
|
|
||||||
|
|
||||||
# Training loop
|
|
||||||
best_val_loss = float('inf')
|
|
||||||
patience = 5
|
|
||||||
patience_counter = 0
|
|
||||||
|
|
||||||
for epoch in range(n_epochs):
|
|
||||||
# Training phase
|
|
||||||
train_loss = 0.0
|
|
||||||
imm_correct, mid_correct, long_correct = 0, 0, 0
|
|
||||||
total = 0
|
|
||||||
|
|
||||||
for X_batch, y_imm_batch, y_mid_batch, y_long_batch, y_val_batch in train_loader:
|
|
||||||
# Zero gradients
|
|
||||||
pretrain_optimizer.zero_grad()
|
|
||||||
|
|
||||||
# Forward pass - we only need the price predictions
|
|
||||||
with torch.cuda.amp.autocast() if agent.use_mixed_precision else contextlib.nullcontext():
|
|
||||||
_, _, price_preds = agent.policy_net(X_batch)
|
|
||||||
|
|
||||||
# Calculate losses for each prediction head
|
|
||||||
imm_loss = imm_criterion(price_preds['immediate'], y_imm_batch)
|
|
||||||
mid_loss = mid_criterion(price_preds['midterm'], y_mid_batch)
|
|
||||||
long_loss = long_criterion(price_preds['longterm'], y_long_batch)
|
|
||||||
value_loss = value_criterion(price_preds['values'], y_val_batch)
|
|
||||||
|
|
||||||
# Combined loss (weighted by importance)
|
|
||||||
total_loss = imm_loss + 0.7 * mid_loss + 0.5 * long_loss + 0.3 * value_loss
|
|
||||||
|
|
||||||
# Backward pass and optimize
|
|
||||||
if agent.use_mixed_precision:
|
|
||||||
agent.scaler.scale(total_loss).backward()
|
|
||||||
agent.scaler.unscale_(pretrain_optimizer)
|
|
||||||
torch.nn.utils.clip_grad_norm_(agent.policy_net.parameters(), 1.0)
|
|
||||||
agent.scaler.step(pretrain_optimizer)
|
|
||||||
agent.scaler.update()
|
|
||||||
else:
|
|
||||||
total_loss.backward()
|
|
||||||
torch.nn.utils.clip_grad_norm_(agent.policy_net.parameters(), 1.0)
|
|
||||||
pretrain_optimizer.step()
|
|
||||||
|
|
||||||
# Accumulate metrics
|
|
||||||
train_loss += total_loss.item()
|
|
||||||
total += X_batch.size(0)
|
|
||||||
|
|
||||||
# Calculate accuracy
|
|
||||||
_, imm_pred = torch.max(price_preds['immediate'], 1)
|
|
||||||
_, mid_pred = torch.max(price_preds['midterm'], 1)
|
|
||||||
_, long_pred = torch.max(price_preds['longterm'], 1)
|
|
||||||
|
|
||||||
imm_correct += (imm_pred == y_imm_batch).sum().item()
|
|
||||||
mid_correct += (mid_pred == y_mid_batch).sum().item()
|
|
||||||
long_correct += (long_pred == y_long_batch).sum().item()
|
|
||||||
|
|
||||||
# Calculate epoch metrics
|
|
||||||
train_loss /= len(train_loader)
|
|
||||||
imm_acc = imm_correct / total
|
|
||||||
mid_acc = mid_correct / total
|
|
||||||
long_acc = long_correct / total
|
|
||||||
|
|
||||||
# Validation phase
|
|
||||||
agent.policy_net.eval()
|
|
||||||
val_loss = 0.0
|
|
||||||
imm_val_correct, mid_val_correct, long_val_correct = 0, 0, 0
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
# Forward pass on validation data
|
|
||||||
_, _, val_price_preds = agent.policy_net(X_val_tensor)
|
|
||||||
|
|
||||||
# Calculate validation losses
|
|
||||||
val_imm_loss = imm_criterion(val_price_preds['immediate'], y_imm_val_tensor)
|
|
||||||
val_mid_loss = mid_criterion(val_price_preds['midterm'], y_mid_val_tensor)
|
|
||||||
val_long_loss = long_criterion(val_price_preds['longterm'], y_long_val_tensor)
|
|
||||||
val_value_loss = value_criterion(val_price_preds['values'], y_val_val_tensor)
|
|
||||||
|
|
||||||
val_total_loss = val_imm_loss + 0.7 * val_mid_loss + 0.5 * val_long_loss + 0.3 * val_value_loss
|
|
||||||
val_loss = val_total_loss.item()
|
|
||||||
|
|
||||||
# Calculate validation accuracy
|
|
||||||
_, imm_val_pred = torch.max(val_price_preds['immediate'], 1)
|
|
||||||
_, mid_val_pred = torch.max(val_price_preds['midterm'], 1)
|
|
||||||
_, long_val_pred = torch.max(val_price_preds['longterm'], 1)
|
|
||||||
|
|
||||||
imm_val_correct = (imm_val_pred == y_imm_val_tensor).sum().item()
|
|
||||||
mid_val_correct = (mid_val_pred == y_mid_val_tensor).sum().item()
|
|
||||||
long_val_correct = (long_val_pred == y_long_val_tensor).sum().item()
|
|
||||||
|
|
||||||
imm_val_acc = imm_val_correct / len(X_val_tensor)
|
|
||||||
mid_val_acc = mid_val_correct / len(X_val_tensor)
|
|
||||||
long_val_acc = long_val_correct / len(X_val_tensor)
|
|
||||||
|
|
||||||
# Learning rate scheduling
|
|
||||||
pretrain_scheduler.step(val_loss)
|
|
||||||
|
|
||||||
# Early stopping check
|
|
||||||
if val_loss < best_val_loss:
|
|
||||||
best_val_loss = val_loss
|
|
||||||
patience_counter = 0
|
|
||||||
# Copy policy_net weights to target_net
|
|
||||||
agent.target_net.load_state_dict(agent.policy_net.state_dict())
|
|
||||||
logger.info(f"Saved best model with validation loss: {val_loss:.4f}")
|
|
||||||
else:
|
|
||||||
patience_counter += 1
|
|
||||||
if patience_counter >= patience:
|
|
||||||
logger.info(f"Early stopping triggered after {epoch+1} epochs")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Log progress
|
|
||||||
logger.info(f"Epoch {epoch+1}/{n_epochs}: "
|
|
||||||
f"Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, "
|
|
||||||
f"Imm Acc: {imm_acc:.4f}/{imm_val_acc:.4f}, "
|
|
||||||
f"Mid Acc: {mid_acc:.4f}/{mid_val_acc:.4f}, "
|
|
||||||
f"Long Acc: {long_acc:.4f}/{long_val_acc:.4f}")
|
|
||||||
|
|
||||||
# Set model back to training mode for next epoch
|
|
||||||
agent.policy_net.train()
|
|
||||||
|
|
||||||
logger.info("Price prediction pre-training complete")
|
|
||||||
return agent
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Error during price prediction pre-training: {str(e)}")
|
|
||||||
import traceback
|
|
||||||
logger.error(traceback.format_exc())
|
|
||||||
return agent
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
train_rl()
|
train_rl()
|
Binary file not shown.
Binary file not shown.
@ -373,7 +373,7 @@ class DataInterface:
|
|||||||
|
|
||||||
return df_copy
|
return df_copy
|
||||||
|
|
||||||
def calculate_pnl(self, predictions, actual_prices, position_size=1.0):
|
def calculate_pnl(self, predictions, actual_prices, position_size=1.0, fee_rate=0.0002):
|
||||||
"""
|
"""
|
||||||
Robust PnL calculator that handles:
|
Robust PnL calculator that handles:
|
||||||
- Action predictions (0=SELL, 1=HOLD, 2=BUY)
|
- Action predictions (0=SELL, 1=HOLD, 2=BUY)
|
||||||
@ -384,6 +384,7 @@ class DataInterface:
|
|||||||
predictions: Array of predicted actions or probabilities
|
predictions: Array of predicted actions or probabilities
|
||||||
actual_prices: Array of actual prices (can be 1D or 2D OHLC format)
|
actual_prices: Array of actual prices (can be 1D or 2D OHLC format)
|
||||||
position_size: Position size multiplier
|
position_size: Position size multiplier
|
||||||
|
fee_rate: Trading fee rate (default: 0.0002 for 0.02% per trade)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
tuple: (total_pnl, win_rate, trades)
|
tuple: (total_pnl, win_rate, trades)
|
||||||
@ -443,13 +444,33 @@ class DataInterface:
|
|||||||
price_change = (next_price - current_price) / current_price
|
price_change = (next_price - current_price) / current_price
|
||||||
|
|
||||||
if action == 2: # BUY
|
if action == 2: # BUY
|
||||||
trade_pnl = price_change * position_size
|
# Calculate raw PnL
|
||||||
|
raw_pnl = price_change * position_size
|
||||||
|
|
||||||
|
# Calculate fees (entry and exit)
|
||||||
|
entry_fee = position_size * fee_rate
|
||||||
|
exit_fee = position_size * (1 + price_change) * fee_rate
|
||||||
|
total_fees = entry_fee + exit_fee
|
||||||
|
|
||||||
|
# Net PnL after fees
|
||||||
|
trade_pnl = raw_pnl - total_fees
|
||||||
|
|
||||||
trade_type = 'BUY'
|
trade_type = 'BUY'
|
||||||
is_win = price_change > 0
|
is_win = trade_pnl > 0
|
||||||
elif action == 0: # SELL
|
elif action == 0: # SELL
|
||||||
trade_pnl = -price_change * position_size
|
# Calculate raw PnL
|
||||||
|
raw_pnl = -price_change * position_size
|
||||||
|
|
||||||
|
# Calculate fees (entry and exit)
|
||||||
|
entry_fee = position_size * fee_rate
|
||||||
|
exit_fee = position_size * (1 - price_change) * fee_rate
|
||||||
|
total_fees = entry_fee + exit_fee
|
||||||
|
|
||||||
|
# Net PnL after fees
|
||||||
|
trade_pnl = raw_pnl - total_fees
|
||||||
|
|
||||||
trade_type = 'SELL'
|
trade_type = 'SELL'
|
||||||
is_win = price_change < 0
|
is_win = trade_pnl > 0
|
||||||
else:
|
else:
|
||||||
continue # Invalid action
|
continue # Invalid action
|
||||||
|
|
||||||
@ -462,6 +483,8 @@ class DataInterface:
|
|||||||
'entry': current_price,
|
'entry': current_price,
|
||||||
'exit': next_price,
|
'exit': next_price,
|
||||||
'pnl': trade_pnl,
|
'pnl': trade_pnl,
|
||||||
|
'raw_pnl': price_change * position_size if trade_type == 'BUY' else -price_change * position_size,
|
||||||
|
'fees': total_fees,
|
||||||
'win': is_win,
|
'win': is_win,
|
||||||
'duration': 1 # In number of candles
|
'duration': 1 # In number of candles
|
||||||
})
|
})
|
||||||
|
65
SYNTHETIC_DATA_REMOVAL_SUMMARY.md
Normal file
65
SYNTHETIC_DATA_REMOVAL_SUMMARY.md
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
# Synthetic Data Removal Summary
|
||||||
|
|
||||||
|
This document summarizes all changes made to eliminate the use of synthetic data throughout the trading system.
|
||||||
|
|
||||||
|
## Files Modified
|
||||||
|
|
||||||
|
1. **NN/train_rl.py**
|
||||||
|
- Removed `_create_synthetic_1s_data` method
|
||||||
|
- Removed `_create_synthetic_hourly_data` method
|
||||||
|
- Removed `_create_synthetic_daily_data` method
|
||||||
|
- Modified `RLTradingEnvironment` class to require all timeframes as real data
|
||||||
|
- Removed fallback to synthetic data when real data is unavailable
|
||||||
|
- Eliminated `generate_price_prediction_training_data` function
|
||||||
|
- Removed `pretrain_price_prediction` function that used synthetic data
|
||||||
|
- Updated `train_rl` function to load all required timeframes
|
||||||
|
|
||||||
|
2. **train_rl_with_realtime.py**
|
||||||
|
- Updated `EnhancedRLTradingEnvironment` class to require all timeframes
|
||||||
|
- Modified `create_enhanced_env` function to load all required timeframes
|
||||||
|
- Added prominent warning logs about requiring real market data
|
||||||
|
- Fixed imports to accommodate the changes
|
||||||
|
|
||||||
|
3. **README_enhanced_trading_model.md**
|
||||||
|
- Updated to emphasize that only real market data is supported
|
||||||
|
- Listed all required timeframes and their importance
|
||||||
|
- Added clear warnings against using synthetic data
|
||||||
|
- Updated usage instructions
|
||||||
|
|
||||||
|
4. **New files created**
|
||||||
|
- **REAL_MARKET_DATA_POLICY.md**: Comprehensive policy document explaining why we only use real market data
|
||||||
|
|
||||||
|
## Key Changes in Implementation
|
||||||
|
|
||||||
|
1. **Data Requirements**
|
||||||
|
- Now explicitly require all timeframes (1m, 5m, 15m, 1h, 1d) as real data
|
||||||
|
- Removed all synthetic data generation functionalities
|
||||||
|
- Added validation to ensure all required timeframes are available
|
||||||
|
|
||||||
|
2. **Error Handling**
|
||||||
|
- Improved error messages when required data is missing
|
||||||
|
- Eliminated synthetic data fallbacks when real data is unavailable
|
||||||
|
- Added clear logging to indicate when real data is required
|
||||||
|
|
||||||
|
3. **Training Process**
|
||||||
|
- Removed pre-training functions that used synthetic data
|
||||||
|
- Updated the main training loop to work exclusively with real data
|
||||||
|
- Disabled options related to synthetic data generation
|
||||||
|
|
||||||
|
## Benefits of These Changes
|
||||||
|
|
||||||
|
1. **More Realistic Training**
|
||||||
|
- Models now train exclusively on real market patterns and behaviors
|
||||||
|
- No risk of learning artificial patterns that don't exist in real markets
|
||||||
|
|
||||||
|
2. **Better Performance**
|
||||||
|
- Trading strategies more likely to work in live markets
|
||||||
|
- Models develop more realistic expectations about market behavior
|
||||||
|
|
||||||
|
3. **Simplified Codebase**
|
||||||
|
- Removal of synthetic data generation code reduces complexity
|
||||||
|
- Clearer data requirements make the system easier to understand and use
|
||||||
|
|
||||||
|
## Conclusion
|
||||||
|
|
||||||
|
These changes ensure our trading system works exclusively with real market data, providing more realistic training and better performance in live trading environments. The system now requires all timeframes to be available as real data and will not fall back to synthetic data under any circumstances.
|
69
TODO.md
Normal file
69
TODO.md
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# Trading System Enhancement TODO List
|
||||||
|
|
||||||
|
## Implemented Enhancements
|
||||||
|
|
||||||
|
1. **Enhanced CNN Architecture**
|
||||||
|
- [x] Implemented deeper CNN with residual connections for better feature extraction
|
||||||
|
- [x] Added self-attention mechanisms to capture temporal patterns
|
||||||
|
- [x] Implemented dueling architecture for more stable Q-value estimation
|
||||||
|
- [x] Added more capacity to prediction heads for better confidence estimation
|
||||||
|
|
||||||
|
2. **Improved Training Pipeline**
|
||||||
|
- [x] Created example sifting dataset to prioritize high-quality training examples
|
||||||
|
- [x] Implemented price prediction pre-training to bootstrap learning
|
||||||
|
- [x] Lowered confidence threshold to allow more trades (0.4 instead of 0.5)
|
||||||
|
- [x] Added better normalization of state inputs
|
||||||
|
|
||||||
|
3. **Visualization and Monitoring**
|
||||||
|
- [x] Added detailed confidence metrics tracking
|
||||||
|
- [x] Implemented TensorBoard logging for pre-training and RL phases
|
||||||
|
- [x] Added more comprehensive trading statistics
|
||||||
|
|
||||||
|
## Future Enhancements
|
||||||
|
|
||||||
|
1. **Model Architecture Improvements**
|
||||||
|
- [ ] Experiment with different residual block configurations
|
||||||
|
- [ ] Implement Transformer-based models for better sequence handling
|
||||||
|
- [ ] Try LSTM/GRU layers to combine with CNN for temporal data
|
||||||
|
- [ ] Implement ensemble methods to combine multiple models
|
||||||
|
|
||||||
|
2. **Training Process Improvements**
|
||||||
|
- [ ] Implement curriculum learning (start with simple patterns, move to complex)
|
||||||
|
- [ ] Add adversarial training to make model more robust
|
||||||
|
- [ ] Implement Meta-Learning approaches for faster adaptation
|
||||||
|
- [ ] Expand pre-training to include extrema detection
|
||||||
|
|
||||||
|
3. **Trading Strategy Enhancements**
|
||||||
|
- [ ] Add position sizing based on confidence levels
|
||||||
|
- [ ] Implement risk management constraints
|
||||||
|
- [ ] Add support for stop-loss and take-profit mechanisms
|
||||||
|
- [ ] Develop adaptive confidence thresholds based on market volatility
|
||||||
|
|
||||||
|
4. **Performance Optimizations**
|
||||||
|
- [ ] Optimize data loading pipeline for faster training
|
||||||
|
- [ ] Implement distributed training for larger models
|
||||||
|
- [ ] Profile and optimize inference speed for real-time trading
|
||||||
|
- [ ] Optimize memory usage for longer training sessions
|
||||||
|
|
||||||
|
5. **Research Directions**
|
||||||
|
- [ ] Explore reinforcement learning algorithms beyond DQN (PPO, SAC, A3C)
|
||||||
|
- [ ] Research ways to incorporate fundamental data
|
||||||
|
- [ ] Investigate transfer learning from pre-trained models
|
||||||
|
- [ ] Study methods to interpret model decisions for better trust
|
||||||
|
|
||||||
|
## Implementation Timeline
|
||||||
|
|
||||||
|
### Short-term (1-2 weeks)
|
||||||
|
- Run extended training with enhanced CNN model
|
||||||
|
- Analyze performance and confidence metrics
|
||||||
|
- Implement the most promising architectural improvements
|
||||||
|
|
||||||
|
### Medium-term (1-2 months)
|
||||||
|
- Implement position sizing and risk management features
|
||||||
|
- Add meta-learning capabilities
|
||||||
|
- Optimize training pipeline
|
||||||
|
|
||||||
|
### Long-term (3+ months)
|
||||||
|
- Research and implement advanced RL algorithms
|
||||||
|
- Create ensemble of specialized models
|
||||||
|
- Integrate fundamental data analysis
|
Binary file not shown.
@ -57,4 +57,11 @@ python train_with_realtime_ticks.py
|
|||||||
python NN/train_rl.py
|
python NN/train_rl.py
|
||||||
python train_rl_with_realtime.py
|
python train_rl_with_realtime.py
|
||||||
|
|
||||||
python train_rl_with_realtime.py --episodes 2 --no-train --visualize-only
|
python train_rl_with_realtime.py --episodes 2 --no-train --visualize-only
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
python train_hybrid_fixed.py --iterations 1000 --sv-epochs 5 --rl-episodes 3 --symbol ETH/USDT --window 24 --batch-size 64 --new-model
|
||||||
|
|
||||||
|
|
||||||
|
501
cache/BTC_USDT_1d_candles.csv
vendored
501
cache/BTC_USDT_1d_candles.csv
vendored
@ -1,501 +0,0 @@
|
|||||||
timestamp,open,high,low,close,volume
|
|
||||||
2023-11-05,35062.06,35380.0,34448.0,35011.88,24528.73376
|
|
||||||
2023-11-06,35011.89,35276.33,34725.9,35046.09,22346.47086
|
|
||||||
2023-11-07,35046.09,35888.0,34523.06,35399.12,38688.73692
|
|
||||||
2023-11-08,35399.13,36106.0,35100.0,35624.72,33401.34137
|
|
||||||
2023-11-09,35624.72,37972.24,35534.05,36701.09,82537.88885
|
|
||||||
2023-11-10,36701.1,37526.0,36324.71,37301.63,43414.04898
|
|
||||||
2023-11-11,37301.63,37408.26,36666.93,37130.0,22984.97235
|
|
||||||
2023-11-12,37129.99,37222.22,36731.1,37064.13,17687.18874
|
|
||||||
2023-11-13,37064.13,37417.99,36333.0,36462.93,32798.18252
|
|
||||||
2023-11-14,36462.93,36744.0,34800.0,35551.19,45503.68416
|
|
||||||
2023-11-15,35551.2,37980.0,35360.0,37858.2,53569.13385
|
|
||||||
2023-11-16,37858.2,37929.54,35500.0,36163.51,47490.39566
|
|
||||||
2023-11-17,36163.51,36800.0,35861.1,36613.92,38283.61112
|
|
||||||
2023-11-18,36613.91,36845.49,36178.58,36568.1,17102.24186
|
|
||||||
2023-11-19,36568.11,37500.0,36384.02,37359.86,21246.34648
|
|
||||||
2023-11-20,37359.85,37750.0,36677.0,37448.78,36022.70291
|
|
||||||
2023-11-21,37448.79,37649.44,35735.0,35741.65,47646.54804
|
|
||||||
2023-11-22,35741.65,37861.1,35632.01,37408.34,45051.30697
|
|
||||||
2023-11-23,37408.35,37653.44,36870.0,37294.28,23827.92882
|
|
||||||
2023-11-24,37294.27,38414.0,37251.51,37713.57,44680.80646
|
|
||||||
2023-11-25,37713.57,37888.0,37591.1,37780.67,11396.14464
|
|
||||||
2023-11-26,37780.67,37814.63,37150.0,37447.43,21264.53723
|
|
||||||
2023-11-27,37447.42,37569.23,36707.0,37242.7,30001.07376
|
|
||||||
2023-11-28,37242.7,38377.0,36868.41,37818.87,37544.46667
|
|
||||||
2023-11-29,37818.88,38450.0,37570.0,37854.64,32994.19107
|
|
||||||
2023-11-30,37854.65,38145.85,37500.0,37723.96,24740.29147
|
|
||||||
2023-12-01,37723.97,38999.0,37615.86,38682.52,43415.66324
|
|
||||||
2023-12-02,38682.51,39717.14,38641.61,39450.35,26696.92161
|
|
||||||
2023-12-03,39450.35,40250.0,39274.86,39972.26,26710.65335
|
|
||||||
2023-12-04,39972.26,42420.0,39972.26,41991.1,79272.33059
|
|
||||||
2023-12-05,41991.1,44488.0,41414.0,44073.32,67490.74644
|
|
||||||
2023-12-06,44073.82,44297.21,43335.28,43762.69,51431.10492
|
|
||||||
2023-12-07,43762.69,44047.33,42821.1,43273.14,47103.26845
|
|
||||||
2023-12-08,43273.15,44700.0,43081.1,44170.99,42900.37556
|
|
||||||
2023-12-09,44171.0,44358.02,43584.51,43713.6,24925.97008
|
|
||||||
2023-12-10,43713.59,44049.0,43563.0,43789.51,18956.61758
|
|
||||||
2023-12-11,43789.5,43804.5,40222.0,41253.4,76663.89804
|
|
||||||
2023-12-12,41253.41,42104.12,40680.0,41492.39,42722.69773
|
|
||||||
2023-12-13,41492.38,43475.2,40555.0,42869.03,45865.99773
|
|
||||||
2023-12-14,42869.03,43420.0,41400.0,43022.26,42047.05709
|
|
||||||
2023-12-15,43022.26,43080.81,41666.0,41940.3,33421.7932
|
|
||||||
2023-12-16,41940.29,42724.43,41605.0,42278.03,24118.85747
|
|
||||||
2023-12-17,42278.02,42424.07,41252.0,41374.65,27722.11452
|
|
||||||
2023-12-18,41374.64,42757.81,40542.93,42657.8,46734.0925
|
|
||||||
2023-12-19,42657.8,43497.0,41811.1,42275.99,40927.86444
|
|
||||||
2023-12-20,42275.99,44283.0,42206.0,43668.93,48710.2947
|
|
||||||
2023-12-21,43668.92,44242.35,43286.72,43861.8,34624.29384
|
|
||||||
2023-12-22,43861.79,44398.26,43412.54,43969.04,32783.19638
|
|
||||||
2023-12-23,43969.04,43988.68,43291.1,43702.16,16557.1293
|
|
||||||
2023-12-24,43702.15,43946.0,42500.0,42991.5,25144.33496
|
|
||||||
2023-12-25,42991.5,43802.32,42720.43,43576.13,27021.23992
|
|
||||||
2023-12-26,43576.12,43592.68,41637.6,42508.93,41010.04282
|
|
||||||
2023-12-27,42508.93,43677.0,42098.69,43428.85,36191.21136
|
|
||||||
2023-12-28,43428.86,43787.57,42241.79,42563.76,35150.52485
|
|
||||||
2023-12-29,42563.76,43111.0,41300.0,42066.95,42597.18912
|
|
||||||
2023-12-30,42066.94,42612.32,41520.3,42140.28,22906.57818
|
|
||||||
2023-12-31,42140.29,42899.0,41965.84,42283.58,23585.91603
|
|
||||||
2024-01-01,42283.58,44184.1,42180.77,44179.55,27174.29903
|
|
||||||
2024-01-02,44179.55,45879.63,44148.34,44946.91,65146.40661
|
|
||||||
2024-01-03,44946.91,45500.0,40750.0,42845.23,81194.55173
|
|
||||||
2024-01-04,42845.23,44729.58,42613.77,44151.1,48038.06334
|
|
||||||
2024-01-05,44151.1,44357.46,42450.0,44145.11,48075.25327
|
|
||||||
2024-01-06,44145.12,44214.42,43397.05,43968.32,17835.06144
|
|
||||||
2024-01-07,43968.32,44480.59,43572.09,43929.02,23023.8508
|
|
||||||
2024-01-08,43929.01,47248.99,43175.0,46951.04,72814.57589
|
|
||||||
2024-01-09,46951.04,47972.0,44748.67,46110.0,69927.66617
|
|
||||||
2024-01-10,46110.0,47695.93,44300.36,46653.99,89911.41203
|
|
||||||
2024-01-11,46654.0,48969.48,45606.06,46339.16,87470.3296
|
|
||||||
2024-01-12,46339.16,46515.53,41500.0,42782.73,86327.93707
|
|
||||||
2024-01-13,42782.74,43257.0,42436.12,42847.99,36118.47464
|
|
||||||
2024-01-14,42847.99,43079.0,41720.0,41732.35,28228.40894
|
|
||||||
2024-01-15,41732.35,43400.43,41718.05,42511.1,40269.89303
|
|
||||||
2024-01-16,42511.1,43578.01,42050.0,43137.95,45045.74589
|
|
||||||
2024-01-17,43137.94,43198.0,42200.69,42776.1,33266.21388
|
|
||||||
2024-01-18,42776.09,42930.0,40683.28,41327.5,43907.51641
|
|
||||||
2024-01-19,41327.51,42196.86,40280.0,41659.03,48342.74559
|
|
||||||
2024-01-20,41659.03,41872.56,41456.3,41696.04,15923.99493
|
|
||||||
2024-01-21,41696.05,41881.39,41500.98,41580.33,11730.16301
|
|
||||||
2024-01-22,41580.32,41689.65,39431.58,39568.02,55426.19911
|
|
||||||
2024-01-23,39568.02,40176.74,38555.0,39897.6,57956.63351
|
|
||||||
2024-01-24,39897.59,40555.0,39484.19,40084.88,39293.82861
|
|
||||||
2024-01-25,40084.89,40300.24,39550.0,39961.09,31022.11853
|
|
||||||
2024-01-26,39961.09,42246.82,39822.52,41823.51,47384.96726
|
|
||||||
2024-01-27,41823.51,42200.0,41394.34,42120.63,16224.41667
|
|
||||||
2024-01-28,42120.63,42842.68,41620.81,42031.06,27294.99838
|
|
||||||
2024-01-29,42031.05,43333.0,41804.88,43302.7,31542.74207
|
|
||||||
2024-01-30,43302.71,43882.36,42683.99,42941.1,37619.24546
|
|
||||||
2024-01-31,42941.1,43745.11,42276.84,42580.0,39871.13688
|
|
||||||
2024-02-01,42580.0,43285.13,41884.28,43082.94,35231.04664
|
|
||||||
2024-02-02,43082.95,43488.0,42546.79,43200.0,29672.14418
|
|
||||||
2024-02-03,43199.99,43380.01,42880.0,43011.09,12033.40998
|
|
||||||
2024-02-04,43011.1,43119.04,42222.0,42582.88,17066.89404
|
|
||||||
2024-02-05,42582.88,43569.76,42258.1,42708.7,29467.75905
|
|
||||||
2024-02-06,42708.7,43399.98,42574.0,43098.95,24675.85433
|
|
||||||
2024-02-07,43098.96,44396.5,42788.0,44349.6,34392.59915
|
|
||||||
2024-02-08,44349.6,45614.3,44331.1,45288.65,45439.62231
|
|
||||||
2024-02-09,45288.66,48200.0,45242.12,47132.77,73503.481
|
|
||||||
2024-02-10,47132.78,48170.0,46800.0,47751.09,24802.35936
|
|
||||||
2024-02-11,47751.08,48592.66,47557.16,48299.99,29958.80837
|
|
||||||
2024-02-12,48300.0,50334.82,47710.01,49917.27,59009.96705
|
|
||||||
2024-02-13,49917.28,50368.61,48300.95,49699.59,55551.56706
|
|
||||||
2024-02-14,49699.6,52043.71,49225.01,51795.17,57046.37401
|
|
||||||
2024-02-15,51795.17,52816.62,51314.0,51880.0,53816.03055
|
|
||||||
2024-02-16,51880.01,52572.08,51566.0,52124.11,37772.25318
|
|
||||||
2024-02-17,52124.1,52162.82,50625.0,51642.64,25674.00622
|
|
||||||
2024-02-18,51642.64,52377.0,51163.28,52137.67,21992.10363
|
|
||||||
2024-02-19,52137.68,52488.77,51677.0,51774.73,29534.99432
|
|
||||||
2024-02-20,51774.74,52985.0,50760.37,52258.82,49614.47318
|
|
||||||
2024-02-21,52258.82,52366.8,50625.0,51849.39,43079.40049
|
|
||||||
2024-02-22,51849.38,52065.78,50940.78,51288.42,35309.44574
|
|
||||||
2024-02-23,51288.42,51548.54,50521.0,50744.15,30545.79544
|
|
||||||
2024-02-24,50744.15,51698.0,50585.0,51568.22,16560.4211
|
|
||||||
2024-02-25,51568.21,51958.55,51279.8,51728.85,18721.63159
|
|
||||||
2024-02-26,51728.85,54910.0,50901.44,54476.47,51256.72199
|
|
||||||
2024-02-27,54476.48,57588.15,54450.13,57037.34,67194.98562
|
|
||||||
2024-02-28,57037.35,64000.0,56691.85,62432.1,118763.46984
|
|
||||||
2024-02-29,62432.11,63676.35,60364.7,61130.98,78425.07603
|
|
||||||
2024-03-01,61130.99,63114.23,60777.0,62387.9,47737.93473
|
|
||||||
2024-03-02,62387.9,62433.19,61561.12,61987.28,25534.73659
|
|
||||||
2024-03-03,61987.28,63231.88,61320.0,63113.97,28994.90903
|
|
||||||
2024-03-04,63113.97,68499.0,62300.0,68245.71,84835.16005
|
|
||||||
2024-03-05,68245.71,69000.0,59005.0,63724.01,132696.7813
|
|
||||||
2024-03-06,63724.01,67641.1,62779.14,66074.04,78738.85491
|
|
||||||
2024-03-07,66074.04,67980.0,65551.0,66823.17,53059.8869
|
|
||||||
2024-03-08,66823.18,69990.0,66082.66,68124.19,74261.932842
|
|
||||||
2024-03-09,68124.2,68541.1,67861.1,68313.27,19872.89743
|
|
||||||
2024-03-10,68313.28,69887.61,68094.75,68955.88,38404.66835
|
|
||||||
2024-03-11,68955.88,72800.0,67024.96,72078.1,75292.825726
|
|
||||||
2024-03-12,72078.1,73000.0,68620.82,71452.01,68783.546691
|
|
||||||
2024-03-13,71452.0,73650.25,71333.31,73072.41,52659.711647
|
|
||||||
2024-03-14,73072.4,73777.0,68555.0,71388.94,71757.628746
|
|
||||||
2024-03-15,71388.94,72419.71,65600.0,69499.85,103334.03546
|
|
||||||
2024-03-16,69499.84,70043.0,64780.0,65300.63,55926.95336
|
|
||||||
2024-03-17,65300.64,68904.4,64533.0,68393.48,49742.21589
|
|
||||||
2024-03-18,68393.47,68956.0,66565.2,67609.99,55691.08088
|
|
||||||
2024-03-19,67610.0,68124.11,61555.0,61937.4,101005.32487
|
|
||||||
2024-03-20,61937.41,68100.0,60775.0,67840.51,90420.58592
|
|
||||||
2024-03-21,67840.51,68240.47,64529.01,65501.27,53357.48002
|
|
||||||
2024-03-22,65501.28,66649.62,62260.0,63796.64,51482.37821
|
|
||||||
2024-03-23,63796.64,65999.0,63000.0,63990.01,26410.11409
|
|
||||||
2024-03-24,63990.02,67628.69,63772.29,67209.99,31395.78015
|
|
||||||
2024-03-25,67210.0,71150.0,66385.06,69880.01,53431.14486
|
|
||||||
2024-03-26,69880.0,71561.1,69280.0,69988.0,38934.38417
|
|
||||||
2024-03-27,69987.99,71769.54,68359.18,69469.99,49119.35685
|
|
||||||
2024-03-28,69469.99,71552.06,68903.62,70780.6,35439.03239
|
|
||||||
2024-03-29,70780.6,70916.16,69009.0,69850.54,25445.08353
|
|
||||||
2024-03-30,69850.53,70321.1,69540.0,69582.18,13644.61142
|
|
||||||
2024-03-31,69582.17,71366.0,69562.99,71280.01,19396.34433
|
|
||||||
2024-04-01,71280.0,71288.23,68062.86,69649.8,41445.32039
|
|
||||||
2024-04-02,69649.81,69674.23,64550.0,65463.99,71799.82793
|
|
||||||
2024-04-03,65463.99,66903.63,64493.07,65963.28,39887.21778
|
|
||||||
2024-04-04,65963.27,69309.91,65064.52,68487.79,41510.48453
|
|
||||||
2024-04-05,68487.8,68756.67,65952.56,67820.62,37915.23073
|
|
||||||
2024-04-06,67820.63,69692.0,67447.83,68896.0,20134.28919
|
|
||||||
2024-04-07,68896.0,70326.29,68824.0,69360.39,21534.74433
|
|
||||||
2024-04-08,69360.38,72797.99,69043.24,71620.0,45723.87624
|
|
||||||
2024-04-09,71620.0,71758.19,68210.0,69146.0,39293.90242
|
|
||||||
2024-04-10,69146.0,71172.08,67518.0,70631.08,42006.02377
|
|
||||||
2024-04-11,70631.08,71305.89,69567.21,70006.23,31917.25595
|
|
||||||
2024-04-12,70006.22,71227.46,65086.86,67116.52,56072.86229
|
|
||||||
2024-04-13,67116.52,67929.0,60660.57,63924.51,71395.22019
|
|
||||||
2024-04-14,63924.52,65840.0,62134.0,65661.84,61599.17818
|
|
||||||
2024-04-15,65661.85,66867.07,62274.4,63419.99,52389.53069
|
|
||||||
2024-04-16,63419.99,64365.0,61600.0,63793.39,53435.29331
|
|
||||||
2024-04-17,63793.4,64499.0,59678.16,61277.37,50610.54509
|
|
||||||
2024-04-18,61277.38,64117.09,60803.35,63470.08,43601.60918
|
|
||||||
2024-04-19,63470.09,65450.0,59600.01,63818.01,69774.30271
|
|
||||||
2024-04-20,63818.01,65419.0,63090.07,64940.59,23137.42975
|
|
||||||
2024-04-21,64940.58,65695.56,64237.5,64941.15,19316.42152
|
|
||||||
2024-04-22,64941.15,67232.35,64500.0,66819.32,31397.99371
|
|
||||||
2024-04-23,66819.32,67183.01,65765.81,66414.0,22599.90004
|
|
||||||
2024-04-24,66414.0,67070.43,63606.06,64289.59,33595.69637
|
|
||||||
2024-04-25,64289.58,65297.94,62794.0,64498.34,31341.46338
|
|
||||||
2024-04-26,64498.33,64820.01,63297.48,63770.01,27085.19346
|
|
||||||
2024-04-27,63770.0,63923.41,62391.24,63461.98,20933.06052
|
|
||||||
2024-04-28,63461.98,64370.0,62781.0,63118.62,16949.20005
|
|
||||||
2024-04-29,63118.62,64228.35,61765.53,63866.0,28150.22947
|
|
||||||
2024-04-30,63866.0,64734.0,59191.6,60672.0,54947.65535
|
|
||||||
2024-05-01,60672.01,60841.63,56552.82,58364.97,81166.46823
|
|
||||||
2024-05-02,58364.97,59625.0,56911.84,59060.61,47583.81961
|
|
||||||
2024-05-03,59060.6,63333.0,58811.32,62882.01,43628.40143
|
|
||||||
2024-05-04,62882.01,64540.0,62541.03,63892.04,24368.69282
|
|
||||||
2024-05-05,63892.03,64646.0,62822.17,64012.0,18526.75029
|
|
||||||
2024-05-06,64012.0,65500.0,62700.0,63165.19,34674.91949
|
|
||||||
2024-05-07,63165.18,64422.41,62261.0,62312.08,25598.79472
|
|
||||||
2024-05-08,62312.07,63020.22,60888.0,61193.03,26121.19004
|
|
||||||
2024-05-09,61193.03,63429.03,60630.0,63074.01,30660.8061
|
|
||||||
2024-05-10,63074.0,63469.13,60187.12,60799.99,36529.34025
|
|
||||||
2024-05-11,60799.99,61515.0,60487.09,60825.99,13374.56936
|
|
||||||
2024-05-12,60825.99,61888.0,60610.0,61483.99,12753.13236
|
|
||||||
2024-05-13,61484.0,63450.0,60749.21,62940.08,32733.41839
|
|
||||||
2024-05-14,62940.09,63118.36,61142.77,61577.49,29088.72041
|
|
||||||
2024-05-15,61577.49,66444.16,61319.47,66206.5,43559.74719
|
|
||||||
2024-05-16,66206.51,66752.01,64602.77,65235.21,31106.3671
|
|
||||||
2024-05-17,65235.21,67451.2,65106.38,67024.0,26292.23409
|
|
||||||
2024-05-18,67024.0,67400.01,66600.0,66915.2,14441.25774
|
|
||||||
2024-05-19,66915.2,67700.0,65857.25,66274.01,18025.30409
|
|
||||||
2024-05-20,66274.0,71515.56,66060.31,71446.62,50816.7011
|
|
||||||
2024-05-21,71446.62,71979.0,69162.94,70148.34,49607.4336
|
|
||||||
2024-05-22,70148.34,70666.0,68842.19,69166.62,27673.18026
|
|
||||||
2024-05-23,69166.62,70096.12,66312.16,67969.65,40513.17374
|
|
||||||
2024-05-24,67969.66,69250.0,66600.12,68549.99,28095.83664
|
|
||||||
2024-05-25,68549.99,69610.0,68500.0,69290.57,12130.39418
|
|
||||||
2024-05-26,69290.56,69562.23,68128.01,68507.67,11872.11797
|
|
||||||
2024-05-27,68507.67,70687.56,68250.0,69436.43,23136.92737
|
|
||||||
2024-05-28,69436.43,69591.81,67277.91,68398.39,32622.97042
|
|
||||||
2024-05-29,68398.4,68935.68,67124.65,67652.42,23159.83149
|
|
||||||
2024-05-30,67652.41,69500.0,67128.0,68352.17,28478.2184
|
|
||||||
2024-05-31,68352.17,69044.1,66670.0,67540.01,26690.32184
|
|
||||||
2024-06-01,67540.01,67900.0,67428.44,67766.85,8837.66133
|
|
||||||
2024-06-02,67766.84,68460.0,67257.47,67765.63,15426.32529
|
|
||||||
2024-06-03,67765.62,70288.0,67612.48,68809.9,29633.374
|
|
||||||
2024-06-04,68809.89,71063.45,68567.32,70537.84,29619.78489
|
|
||||||
2024-06-05,70537.83,71758.0,70383.66,71108.0,28703.18082
|
|
||||||
2024-06-06,71108.0,71700.0,70117.64,70799.06,21842.00449
|
|
||||||
2024-06-07,70799.06,71997.02,68420.0,69355.6,35598.45045
|
|
||||||
2024-06-08,69355.6,69582.2,69168.02,69310.46,9773.82967
|
|
||||||
2024-06-09,69310.46,69857.14,69130.24,69648.14,9890.56709
|
|
||||||
2024-06-10,69648.15,70195.94,69172.29,69540.0,17122.66941
|
|
||||||
2024-06-11,69540.0,69590.01,66051.0,67314.24,41436.01588
|
|
||||||
2024-06-12,67314.23,69999.0,66905.0,68263.99,37175.32356
|
|
||||||
2024-06-13,68263.98,68449.3,66251.78,66773.01,29079.55571
|
|
||||||
2024-06-14,66773.01,67370.24,65078.0,66043.99,28408.18797
|
|
||||||
2024-06-15,66043.99,66478.48,65857.1,66228.25,11451.80242
|
|
||||||
2024-06-16,66228.25,66998.7,66034.5,66676.87,9392.52223
|
|
||||||
2024-06-17,66676.86,67298.81,65130.0,66504.33,27386.16851
|
|
||||||
2024-06-18,66504.33,66588.23,64060.0,65175.32,42350.10244
|
|
||||||
2024-06-19,65175.32,65727.54,64666.0,64974.37,20060.79576
|
|
||||||
2024-06-20,64974.37,66482.94,64559.15,64869.99,24265.29031
|
|
||||||
2024-06-21,64869.99,65066.66,63379.35,64143.56,25993.56442
|
|
||||||
2024-06-22,64143.56,64546.81,63943.82,64262.01,7308.95542
|
|
||||||
2024-06-23,64262.01,64521.0,63178.32,63210.01,8224.45447
|
|
||||||
2024-06-24,63210.01,63369.8,58402.0,60293.3,52161.35414
|
|
||||||
2024-06-25,60293.3,62420.0,60257.06,61806.01,31189.24361
|
|
||||||
2024-06-26,61806.01,62487.81,60712.0,60864.99,22485.66463
|
|
||||||
2024-06-27,60864.98,62389.22,60606.63,61706.47,18344.28631
|
|
||||||
2024-06-28,61706.46,62225.31,60063.0,60427.84,24821.19255
|
|
||||||
2024-06-29,60427.84,61224.0,60383.77,60986.68,11509.55904
|
|
||||||
2024-06-30,60986.68,63058.76,60712.21,62772.01,17326.30136
|
|
||||||
2024-07-01,62772.01,63861.76,62497.2,62899.99,24547.10538
|
|
||||||
2024-07-02,62900.0,63288.83,61806.28,62135.47,18573.11875
|
|
||||||
2024-07-03,62135.46,62285.94,59400.0,60208.58,32160.11127
|
|
||||||
2024-07-04,60208.57,60498.19,56771.0,57050.01,54568.77276
|
|
||||||
2024-07-05,57050.02,57546.0,53485.93,56628.79,81348.24756
|
|
||||||
2024-07-06,56628.79,58475.0,56018.0,58230.13,21651.31558
|
|
||||||
2024-07-07,58230.13,58449.46,55724.37,55857.81,19118.93918
|
|
||||||
2024-07-08,55857.81,58236.73,54260.16,56714.62,48090.2049
|
|
||||||
2024-07-09,56714.61,58296.0,56289.45,58050.0,27732.20788
|
|
||||||
2024-07-10,58050.0,59470.0,57157.79,57725.85,24951.73799
|
|
||||||
2024-07-11,57725.85,59650.0,57050.0,57339.89,29761.05735
|
|
||||||
2024-07-12,57339.89,58526.68,56542.47,57889.1,23652.4569
|
|
||||||
2024-07-13,57889.09,59850.0,57756.63,59204.02,15357.74519
|
|
||||||
2024-07-14,59204.01,61420.69,59194.01,60797.91,21178.33907
|
|
||||||
2024-07-15,60797.91,64900.0,60632.3,64724.14,38690.9782
|
|
||||||
2024-07-16,64724.06,65388.97,62373.24,65043.99,42530.52915
|
|
||||||
2024-07-17,65044.0,66128.63,63854.0,64087.99,29567.52954
|
|
||||||
2024-07-18,64087.99,65133.3,63238.48,63987.92,22568.7225
|
|
||||||
2024-07-19,63987.92,67386.0,63300.67,66660.0,35634.72739
|
|
||||||
2024-07-20,66660.01,67598.0,66222.46,67139.96,14386.92434
|
|
||||||
2024-07-21,67139.97,68366.66,65777.0,68165.34,21819.11191
|
|
||||||
2024-07-22,68165.35,68474.55,66559.97,67532.01,21451.04303
|
|
||||||
2024-07-23,67532.0,67750.98,65441.08,65936.01,31406.15316
|
|
||||||
2024-07-24,65936.0,67102.01,65111.0,65376.0,23082.56277
|
|
||||||
2024-07-25,65376.01,66175.49,63456.7,65799.95,35126.42934
|
|
||||||
2024-07-26,65799.95,68200.0,65722.63,67907.99,24244.36023
|
|
||||||
2024-07-27,67908.0,69399.99,66650.0,67896.5,31710.21921
|
|
||||||
2024-07-28,67896.49,68318.43,67066.66,68249.88,10868.69394
|
|
||||||
2024-07-29,68249.88,70079.99,66428.0,66784.69,36467.29633
|
|
||||||
2024-07-30,66784.68,67000.0,65302.67,66188.0,23132.25441
|
|
||||||
2024-07-31,66188.0,66849.24,64530.0,64628.0,22625.43905
|
|
||||||
2024-08-01,64628.01,65659.78,62302.0,65354.02,35542.26854
|
|
||||||
2024-08-02,65354.02,65596.14,61230.01,61498.33,38820.42937
|
|
||||||
2024-08-03,61498.34,62198.22,59850.0,60697.99,28034.71567
|
|
||||||
2024-08-04,60697.99,61117.63,57122.77,58161.0,31616.52003
|
|
||||||
2024-08-05,58161.0,58305.59,49000.0,54018.81,162065.59186
|
|
||||||
2024-08-06,54018.82,57040.99,53950.0,56022.01,55884.77676
|
|
||||||
2024-08-07,56022.0,57736.05,54558.62,55134.16,44269.37684
|
|
||||||
2024-08-08,55133.76,62745.14,54730.0,61685.99,48349.52949
|
|
||||||
2024-08-09,61686.0,61744.37,59535.0,60837.99,30972.48017
|
|
||||||
2024-08-10,60837.99,61470.58,60242.0,60923.51,9995.20621
|
|
||||||
2024-08-11,60923.51,61858.0,58286.73,58712.59,19189.84512
|
|
||||||
2024-08-12,58712.59,60711.09,57642.21,59346.64,37009.91743
|
|
||||||
2024-08-13,59346.64,61578.1,58392.88,60587.15,27858.95851
|
|
||||||
2024-08-14,60587.16,61800.0,58433.18,58683.39,28422.76326
|
|
||||||
2024-08-15,58683.39,59849.38,56078.54,57541.06,37686.17622
|
|
||||||
2024-08-16,57541.05,59817.76,57098.62,58874.6,27610.84344
|
|
||||||
2024-08-17,58874.59,59700.0,58785.05,59491.99,7721.72931
|
|
||||||
2024-08-18,59491.99,60284.99,58408.92,58427.35,13634.85717
|
|
||||||
2024-08-19,58427.35,59617.63,57787.3,59438.5,22809.31251
|
|
||||||
2024-08-20,59438.5,61400.0,58548.23,59013.8,31477.44548
|
|
||||||
2024-08-21,59013.8,61820.93,58783.47,61156.03,27983.6422
|
|
||||||
2024-08-22,61156.03,61400.0,59724.87,60375.84,21241.20588
|
|
||||||
2024-08-23,60375.83,64955.0,60342.14,64037.24,38118.07089
|
|
||||||
2024-08-24,64037.24,64494.5,63531.0,64157.01,15857.15616
|
|
||||||
2024-08-25,64157.02,65000.0,63773.27,64220.0,12305.47977
|
|
||||||
2024-08-26,64219.99,64481.0,62800.0,62834.0,19470.05276
|
|
||||||
2024-08-27,62834.0,63212.0,58034.01,59415.0,35135.94178
|
|
||||||
2024-08-28,59415.0,60234.98,57860.0,59034.9,36868.54275
|
|
||||||
2024-08-29,59034.9,61166.99,58713.09,59359.01,27020.90743
|
|
||||||
2024-08-30,59359.0,59944.07,57701.1,59123.99,28519.32195
|
|
||||||
2024-08-31,59123.99,59462.38,58744.0,58973.99,8798.409
|
|
||||||
2024-09-01,58974.0,59076.59,57201.0,57301.86,20705.15741
|
|
||||||
2024-09-02,57301.77,59425.69,57128.0,59132.13,22895.01461
|
|
||||||
2024-09-03,59132.12,59809.65,57415.0,57487.73,22828.18447
|
|
||||||
2024-09-04,57487.74,58519.0,55606.0,57970.9,35560.82146
|
|
||||||
2024-09-05,57970.9,58327.07,55643.65,56180.0,27806.91413
|
|
||||||
2024-09-06,56180.0,57008.0,52550.0,53962.97,54447.76826
|
|
||||||
2024-09-07,53962.97,54850.0,53745.54,54160.86,16694.04774
|
|
||||||
2024-09-08,54160.86,55318.0,53629.01,54869.95,16274.14779
|
|
||||||
2024-09-09,54869.95,58088.0,54591.96,57042.0,32384.51737
|
|
||||||
2024-09-10,57042.01,58044.36,56386.4,57635.99,23626.78126
|
|
||||||
2024-09-11,57635.99,57981.71,55545.19,57338.0,33026.56757
|
|
||||||
2024-09-12,57338.0,58588.0,57324.0,58132.32,31074.40631
|
|
||||||
2024-09-13,58132.31,60625.0,57632.62,60498.0,29825.23333
|
|
||||||
2024-09-14,60497.99,60610.45,59400.0,59993.03,12137.90901
|
|
||||||
2024-09-15,59993.02,60395.8,58691.05,59132.0,13757.92361
|
|
||||||
2024-09-16,59132.0,59210.7,57493.3,58213.99,26477.5642
|
|
||||||
2024-09-17,58213.99,61320.0,57610.01,60313.99,33116.25878
|
|
||||||
2024-09-18,60313.99,61786.24,59174.8,61759.99,36087.02469
|
|
||||||
2024-09-19,61759.98,63850.0,61555.0,62947.99,34332.52608
|
|
||||||
2024-09-20,62948.0,64133.32,62350.0,63201.05,25466.37794
|
|
||||||
2024-09-21,63201.05,63559.9,62758.0,63348.96,8375.34608
|
|
||||||
2024-09-22,63348.97,64000.0,62357.93,63578.76,14242.19892
|
|
||||||
2024-09-23,63578.76,64745.88,62538.75,63339.99,24078.05287
|
|
||||||
2024-09-24,63339.99,64688.0,62700.0,64262.7,23185.04759
|
|
||||||
2024-09-25,64262.7,64817.99,62947.08,63152.01,17813.11168
|
|
||||||
2024-09-26,63152.01,65839.0,62670.0,65173.99,28373.30593
|
|
||||||
2024-09-27,65173.99,66498.0,64819.9,65769.95,22048.80487
|
|
||||||
2024-09-28,65769.95,66260.0,65422.23,65858.0,9127.23316
|
|
||||||
2024-09-29,65858.0,66076.12,65432.0,65602.01,8337.74111
|
|
||||||
2024-09-30,65602.01,65618.8,62856.3,63327.59,30011.08752
|
|
||||||
2024-10-01,63327.6,64130.63,60164.0,60805.78,43671.48108
|
|
||||||
2024-10-02,60804.92,62390.31,60000.0,60649.28,31534.70118
|
|
||||||
2024-10-03,60649.27,61477.19,59828.11,60752.71,26221.43472
|
|
||||||
2024-10-04,60752.72,62484.85,60459.9,62086.0,21294.65994
|
|
||||||
2024-10-05,62086.0,62370.56,61689.26,62058.0,7807.46141
|
|
||||||
2024-10-06,62058.01,62975.0,61798.97,62819.91,8906.86177
|
|
||||||
2024-10-07,62819.91,64478.19,62128.0,62224.0,25966.1852
|
|
||||||
2024-10-08,62224.01,63200.0,61860.31,62160.49,19702.22371
|
|
||||||
2024-10-09,62160.5,62543.75,60301.0,60636.02,20011.15684
|
|
||||||
2024-10-10,60636.01,61321.68,58946.0,60326.39,23967.92481
|
|
||||||
2024-10-11,60326.4,63417.56,60087.64,62540.0,23641.35209
|
|
||||||
2024-10-12,62539.99,63480.0,62487.23,63206.22,10911.30116
|
|
||||||
2024-10-13,63206.23,63285.72,62050.0,62870.02,11909.21995
|
|
||||||
2024-10-14,62870.02,66500.0,62457.81,66083.99,37669.95222
|
|
||||||
2024-10-15,66084.0,67950.0,64800.01,67074.14,43683.95423
|
|
||||||
2024-10-16,67074.14,68424.0,66750.49,67620.01,29938.25544
|
|
||||||
2024-10-17,67620.0,67939.4,66666.0,67421.78,25328.22861
|
|
||||||
2024-10-18,67421.78,69000.0,67192.36,68428.0,28725.635
|
|
||||||
2024-10-19,68427.99,68693.26,68010.0,68378.0,8193.66737
|
|
||||||
2024-10-20,68377.99,69400.0,68100.0,69031.99,12442.47378
|
|
||||||
2024-10-21,69032.0,69519.52,66840.67,67377.5,31374.42184
|
|
||||||
2024-10-22,67377.5,67836.01,66571.42,67426.0,24598.96268
|
|
||||||
2024-10-23,67426.01,67472.83,65260.0,66668.65,25530.2407
|
|
||||||
2024-10-24,66668.65,68850.0,66510.0,68198.28,22589.83877
|
|
||||||
2024-10-25,68198.27,68771.49,65596.29,66698.33,34479.71125
|
|
||||||
2024-10-26,66698.32,67454.55,66439.9,67092.76,11842.9077
|
|
||||||
2024-10-27,67092.76,68332.05,66913.73,68021.7,8653.19592
|
|
||||||
2024-10-28,68021.69,70270.0,67618.0,69962.21,29046.75459
|
|
||||||
2024-10-29,69962.21,73620.12,69760.0,72736.42,50128.60594
|
|
||||||
2024-10-30,72736.41,72961.0,71436.0,72344.74,26885.99056
|
|
||||||
2024-10-31,72344.75,72700.0,69685.76,70292.01,29352.10297
|
|
||||||
2024-11-01,70292.01,71632.95,68820.14,69496.01,38301.86755
|
|
||||||
2024-11-02,69496.0,69914.37,69000.14,69374.74,10521.67243
|
|
||||||
2024-11-03,69374.74,69391.0,67478.73,68775.99,24995.70243
|
|
||||||
2024-11-04,68775.99,69500.0,66835.0,67850.01,29800.39187
|
|
||||||
2024-11-05,67850.01,70577.91,67476.63,69372.01,33355.06888
|
|
||||||
2024-11-06,69372.01,76400.0,69298.0,75571.99,104126.994787
|
|
||||||
2024-11-07,75571.99,76849.99,74416.0,75857.89,44869.422345
|
|
||||||
2024-11-08,75857.89,77199.99,75555.0,76509.78,36521.099583
|
|
||||||
2024-11-09,76509.78,76900.0,75714.66,76677.46,16942.07915
|
|
||||||
2024-11-10,76677.46,81500.0,76492.0,80370.01,61830.100435
|
|
||||||
2024-11-11,80370.01,89530.54,80216.01,88647.99,82323.665776
|
|
||||||
2024-11-12,88648.0,89940.0,85072.0,87952.01,97299.887911
|
|
||||||
2024-11-13,87952.0,93265.64,86127.99,90375.2,86763.854127
|
|
||||||
2024-11-14,90375.21,91790.0,86668.21,87325.59,56729.51086
|
|
||||||
2024-11-15,87325.59,91850.0,87073.38,91032.07,47927.95068
|
|
||||||
2024-11-16,91032.08,91779.66,90056.17,90586.92,22717.87689
|
|
||||||
2024-11-17,90587.98,91449.99,88722.0,89855.99,23867.55609
|
|
||||||
2024-11-18,89855.98,92594.0,89376.9,90464.08,46545.03448
|
|
||||||
2024-11-19,90464.07,93905.51,90357.0,92310.79,43660.04682
|
|
||||||
2024-11-20,92310.8,94831.97,91500.0,94286.56,42203.198712
|
|
||||||
2024-11-21,94286.56,98988.0,94040.0,98317.12,69228.360477
|
|
||||||
2024-11-22,98317.12,99588.01,97122.11,98892.0,46189.309243
|
|
||||||
2024-11-23,98892.0,98908.85,97136.0,97672.4,24757.84367
|
|
||||||
2024-11-24,97672.4,98564.0,95734.77,97900.04,31200.97838
|
|
||||||
2024-11-25,97900.05,98871.8,92600.19,93010.01,50847.45096
|
|
||||||
2024-11-26,93010.01,94973.37,90791.1,91965.16,57858.73138
|
|
||||||
2024-11-27,91965.16,97208.21,91792.14,95863.11,41153.42734
|
|
||||||
2024-11-28,95863.11,96564.0,94640.0,95643.98,28814.54357
|
|
||||||
2024-11-29,95643.99,98619.99,95364.99,97460.0,27701.78231
|
|
||||||
2024-11-30,97460.0,97463.95,96092.01,96407.99,14503.83306
|
|
||||||
2024-12-01,96407.99,97836.0,95693.88,97185.18,16938.60452
|
|
||||||
2024-12-02,97185.17,98130.0,94395.0,95840.62,37958.66981
|
|
||||||
2024-12-03,95840.61,96305.52,93578.17,95849.69,35827.32283
|
|
||||||
2024-12-04,95849.69,99000.0,94587.83,98587.32,43850.53728
|
|
||||||
2024-12-05,98587.32,104088.0,90500.0,96945.63,109921.729662
|
|
||||||
2024-12-06,96945.63,101898.99,95981.72,99740.84,45049.5331
|
|
||||||
2024-12-07,99740.84,100439.18,98844.0,99831.99,14931.9459
|
|
||||||
2024-12-08,99831.99,101351.0,98657.7,101109.59,14612.99688
|
|
||||||
2024-12-09,101109.6,101215.93,94150.05,97276.47,53949.11595
|
|
||||||
2024-12-10,97276.48,98270.0,94256.54,96593.0,51708.68933
|
|
||||||
2024-12-11,96593.0,101888.0,95658.24,101125.0,37753.78291
|
|
||||||
2024-12-12,101125.0,102540.0,99311.64,100004.29,29232.08745
|
|
||||||
2024-12-13,100004.29,101895.26,99205.0,101424.25,21904.03923
|
|
||||||
2024-12-14,101424.24,102650.0,100609.41,101420.0,14191.70326
|
|
||||||
2024-12-15,101420.0,105250.0,101237.14,104463.99,22228.921775
|
|
||||||
2024-12-16,104463.99,107793.07,103333.0,106058.66,41302.40274
|
|
||||||
2024-12-17,106058.65,108353.0,105321.49,106133.74,29064.936466
|
|
||||||
2024-12-18,106133.74,106524.98,100000.0,100204.01,50307.99755
|
|
||||||
2024-12-19,100204.01,102800.11,95700.0,97461.86,55147.398
|
|
||||||
2024-12-20,97461.86,98233.0,92232.54,97805.44,62884.1357
|
|
||||||
2024-12-21,97805.44,99540.61,96398.39,97291.99,23483.54143
|
|
||||||
2024-12-22,97292.0,97448.08,94250.35,95186.27,19353.83036
|
|
||||||
2024-12-23,95186.28,96538.92,92520.0,94881.47,32810.76703
|
|
||||||
2024-12-24,94881.47,99487.99,93569.02,98663.58,23674.22488
|
|
||||||
2024-12-25,98663.58,99569.15,97632.02,99429.6,14474.1651
|
|
||||||
2024-12-26,99429.61,99963.7,95199.14,95791.6,21192.36727
|
|
||||||
2024-12-27,95791.6,97544.58,93500.01,94299.03,26501.26429
|
|
||||||
2024-12-28,94299.03,95733.99,94135.66,95300.0,8385.8929
|
|
||||||
2024-12-29,95300.0,95340.0,93009.52,93738.2,13576.00578
|
|
||||||
2024-12-30,93738.19,95024.5,91530.45,92792.05,27619.4225
|
|
||||||
2024-12-31,92792.05,96250.0,92033.73,93576.0,19612.03389
|
|
||||||
2025-01-01,93576.0,95151.15,92888.0,94591.79,10373.32613
|
|
||||||
2025-01-02,94591.78,97839.5,94392.0,96984.79,21970.48948
|
|
||||||
2025-01-03,96984.79,98976.91,96100.01,98174.18,15253.82936
|
|
||||||
2025-01-04,98174.17,98778.43,97514.79,98220.5,8990.05651
|
|
||||||
2025-01-05,98220.51,98836.85,97276.79,98363.61,8095.63723
|
|
||||||
2025-01-06,98363.61,102480.0,97920.0,102235.6,25263.43375
|
|
||||||
2025-01-07,102235.6,102724.38,96181.81,96954.61,32059.87537
|
|
||||||
2025-01-08,96954.6,97268.65,92500.9,95060.61,33704.67894
|
|
||||||
2025-01-09,95060.61,95382.32,91203.67,92552.49,34544.83685
|
|
||||||
2025-01-10,92552.49,95836.0,92206.02,94726.11,31482.86424
|
|
||||||
2025-01-11,94726.1,95050.94,93831.73,94599.99,7047.9043
|
|
||||||
2025-01-12,94599.99,95450.1,93711.19,94545.06,8606.86622
|
|
||||||
2025-01-13,94545.07,95940.0,89256.69,94536.1,42619.56423
|
|
||||||
2025-01-14,94536.11,97371.0,94346.22,96560.86,27846.61753
|
|
||||||
2025-01-15,96560.85,100681.94,96500.0,100497.35,30509.99179
|
|
||||||
2025-01-16,100497.35,100866.66,97335.13,99987.3,27832.85317
|
|
||||||
2025-01-17,99987.3,105865.22,99950.77,104077.48,39171.85292
|
|
||||||
2025-01-18,104077.47,104988.88,102277.55,104556.23,24307.82998
|
|
||||||
2025-01-19,104556.23,106422.43,99651.6,101331.57,43397.28298
|
|
||||||
2025-01-20,101331.57,109588.0,99550.0,102260.01,89529.231732
|
|
||||||
2025-01-21,102260.0,107240.81,100119.04,106143.82,45941.02002
|
|
||||||
2025-01-22,106143.82,106394.46,103339.12,103706.66,22248.69254
|
|
||||||
2025-01-23,103706.66,106850.0,101262.28,103910.34,53953.12031
|
|
||||||
2025-01-24,103910.35,107120.0,102750.0,104870.5,23609.24017
|
|
||||||
2025-01-25,104870.51,105286.52,104106.09,104746.85,9068.32377
|
|
||||||
2025-01-26,104746.86,105500.0,102520.44,102620.0,9812.51238
|
|
||||||
2025-01-27,102620.01,103260.0,97777.77,102082.83,50758.1341
|
|
||||||
2025-01-28,102082.83,103800.0,100272.68,101335.52,22022.05765
|
|
||||||
2025-01-29,101335.52,104782.68,101328.01,103733.24,23155.35802
|
|
||||||
2025-01-30,103733.25,106457.44,103278.54,104722.94,19374.07472
|
|
||||||
2025-01-31,104722.94,106012.0,101560.0,102429.56,21983.18193
|
|
||||||
2025-02-01,102429.56,102783.71,100279.51,100635.65,12290.95747
|
|
||||||
2025-02-02,100635.66,101456.6,96150.0,97700.59,34619.49939
|
|
||||||
2025-02-03,97700.59,102500.01,91231.0,101328.52,75164.7385
|
|
||||||
2025-02-04,101328.51,101732.31,96150.0,97763.13,40267.98697
|
|
||||||
2025-02-05,97763.14,99149.0,96155.0,96612.43,26233.30444
|
|
||||||
2025-02-06,96612.44,99120.0,95676.64,96554.35,23515.20405
|
|
||||||
2025-02-07,96554.35,100137.99,95620.34,96506.8,31794.22065
|
|
||||||
2025-02-08,96506.8,96880.0,95688.0,96444.74,10147.24294
|
|
||||||
2025-02-09,96444.75,97323.09,94713.0,96462.75,14120.91613
|
|
||||||
2025-02-10,96462.75,98345.0,95256.0,97430.82,20572.87537
|
|
||||||
2025-02-11,97430.82,98478.42,94876.88,95778.2,18647.76379
|
|
||||||
2025-02-12,95778.21,98119.99,94088.23,97869.99,29151.16625
|
|
||||||
2025-02-13,97870.0,98083.91,95217.36,96608.14,19921.77616
|
|
||||||
2025-02-14,96608.13,98826.0,96252.82,97500.48,18173.02646
|
|
||||||
2025-02-15,97500.47,97972.26,97223.58,97569.66,7349.37683
|
|
||||||
2025-02-16,97569.67,97704.47,96046.18,96118.12,8191.4249
|
|
||||||
2025-02-17,96118.12,97046.59,95205.0,95780.0,16492.0451
|
|
||||||
2025-02-18,95780.01,96753.91,93388.09,95671.74,23368.19471
|
|
||||||
2025-02-19,95671.74,96899.99,95029.99,96644.37,16438.50954
|
|
||||||
2025-02-20,96644.37,98711.36,96415.09,98305.0,17057.39177
|
|
||||||
2025-02-21,98305.01,99475.0,94871.95,96181.98,32249.2814
|
|
||||||
2025-02-22,96181.99,96980.0,95770.49,96551.01,11268.17708
|
|
||||||
2025-02-23,96551.01,96650.0,95227.94,96258.0,10884.84913
|
|
||||||
2025-02-24,96258.0,96500.0,91349.26,91552.88,31550.10299
|
|
||||||
2025-02-25,91552.88,92540.69,86050.99,88680.4,78333.11111
|
|
||||||
2025-02-26,88680.39,89414.15,82256.01,84250.09,56893.54409
|
|
||||||
2025-02-27,84250.09,87078.46,82716.49,84708.58,42505.45439
|
|
||||||
2025-02-28,84708.57,85120.0,78258.52,84349.94,83648.03969
|
|
||||||
2025-03-01,84349.95,86558.0,83824.78,86064.53,25785.05464
|
|
||||||
2025-03-02,86064.54,95000.0,85050.6,94270.0,54889.09045
|
|
||||||
2025-03-03,94269.99,94416.46,85117.11,86220.61,59171.10218
|
|
||||||
2025-03-04,86221.16,88967.52,81500.0,87281.98,55609.10706
|
|
||||||
2025-03-05,87281.98,91000.0,86334.53,90606.01,38264.01163
|
|
||||||
2025-03-06,90606.0,92810.64,87836.0,89931.89,34342.44902
|
|
||||||
2025-03-07,89931.88,91283.02,84667.03,86801.75,57980.35713
|
|
||||||
2025-03-08,86801.74,86897.25,85218.47,86222.45,12989.23054
|
|
||||||
2025-03-09,86222.46,86500.0,80000.0,80734.37,26115.39345
|
|
||||||
2025-03-10,80734.48,84123.46,77459.91,78595.86,47633.38405
|
|
||||||
2025-03-11,78595.86,83617.4,76606.0,82932.99,48770.06853
|
|
||||||
2025-03-12,82932.99,84539.85,80607.65,83680.12,31933.986
|
|
||||||
2025-03-13,83680.12,84336.33,79939.9,81115.78,27546.27412
|
|
||||||
2025-03-14,81115.78,85309.71,80818.84,83983.2,26858.52755
|
|
||||||
2025-03-15,83983.19,84676.28,83618.0,84338.44,11324.7332
|
|
||||||
2025-03-16,84338.44,85117.04,81981.12,82574.53,17596.12531
|
|
||||||
2025-03-17,82574.52,84756.83,82456.0,84010.03,17214.74358
|
|
||||||
2025-03-18,84010.02,84021.74,81134.66,81991.92,16761.13242
|
|
|
501
cache/BTC_USDT_1h_candles.csv
vendored
501
cache/BTC_USDT_1h_candles.csv
vendored
@ -1,501 +0,0 @@
|
|||||||
timestamp,open,high,low,close,volume
|
|
||||||
2025-02-26 02:00:00,88705.66,89198.23,88434.78,89018.87,797.74749
|
|
||||||
2025-02-26 03:00:00,89018.86,89414.15,88886.35,88900.0,839.97669
|
|
||||||
2025-02-26 04:00:00,88900.01,89245.29,88673.11,88693.95,899.05159
|
|
||||||
2025-02-26 05:00:00,88693.96,88890.0,88392.45,88423.6,988.78032
|
|
||||||
2025-02-26 06:00:00,88423.59,88940.01,88422.79,88920.18,919.32888
|
|
||||||
2025-02-26 07:00:00,88920.18,89163.39,88399.43,88720.02,1036.33862
|
|
||||||
2025-02-26 08:00:00,88720.01,88807.8,88138.0,88484.54,1065.37357
|
|
||||||
2025-02-26 09:00:00,88484.54,89289.67,88464.0,89115.98,1242.78583
|
|
||||||
2025-02-26 10:00:00,89115.99,89314.45,88882.35,89262.66,896.12405
|
|
||||||
2025-02-26 11:00:00,89262.67,89270.0,88534.55,88616.8,971.12242
|
|
||||||
2025-02-26 12:00:00,88616.8,88674.0,87616.24,87938.81,1495.00066
|
|
||||||
2025-02-26 13:00:00,87938.81,88090.9,87031.73,87191.99,1626.13903
|
|
||||||
2025-02-26 14:00:00,87191.99,87741.33,85418.72,87621.22,6538.63785
|
|
||||||
2025-02-26 15:00:00,87621.21,88530.0,87422.0,87567.53,3566.70518
|
|
||||||
2025-02-26 16:00:00,87567.53,87806.62,86280.82,86805.99,2995.10715
|
|
||||||
2025-02-26 17:00:00,86806.0,87303.47,85687.81,86051.33,2306.12765
|
|
||||||
2025-02-26 18:00:00,86051.33,86498.0,83418.0,84258.94,7003.15035
|
|
||||||
2025-02-26 19:00:00,84258.25,85084.64,83745.88,83856.99,5331.27874
|
|
||||||
2025-02-26 20:00:00,83856.98,84620.0,82256.01,84385.89,6344.90853
|
|
||||||
2025-02-26 21:00:00,84384.34,85131.82,83464.0,84610.86,4334.71538
|
|
||||||
2025-02-26 22:00:00,84611.99,85400.0,84119.97,84433.51,2622.32191
|
|
||||||
2025-02-26 23:00:00,84433.5,84782.25,83918.38,84250.09,1205.19789
|
|
||||||
2025-02-27 00:00:00,84250.09,84808.11,84026.96,84742.39,2478.54167
|
|
||||||
2025-02-27 01:00:00,84742.38,85466.55,84324.01,84763.25,1558.86472
|
|
||||||
2025-02-27 02:00:00,84763.25,85214.25,84199.46,84295.2,1689.21881
|
|
||||||
2025-02-27 03:00:00,84295.19,85342.5,84272.91,85226.32,1178.7731
|
|
||||||
2025-02-27 04:00:00,85226.32,85369.39,84694.29,85161.93,1354.89632
|
|
||||||
2025-02-27 05:00:00,85161.93,85944.76,85057.67,85883.13,1433.68123
|
|
||||||
2025-02-27 06:00:00,85883.13,86737.0,85707.64,86265.84,1364.85224
|
|
||||||
2025-02-27 07:00:00,86265.84,86600.0,86106.26,86272.01,1336.21438
|
|
||||||
2025-02-27 08:00:00,86272.01,86447.61,85900.0,86078.47,898.27844
|
|
||||||
2025-02-27 09:00:00,86078.47,86352.51,85846.87,86169.81,1056.88458
|
|
||||||
2025-02-27 10:00:00,86169.81,87073.42,86026.38,87013.46,1227.38744
|
|
||||||
2025-02-27 11:00:00,87013.47,87078.46,86389.86,86698.6,920.03781
|
|
||||||
2025-02-27 12:00:00,86698.6,86795.9,86252.53,86330.59,997.71723
|
|
||||||
2025-02-27 13:00:00,86330.59,86769.93,85778.98,86117.57,1960.01107
|
|
||||||
2025-02-27 14:00:00,86117.57,86411.76,84951.51,84968.65,4500.81704
|
|
||||||
2025-02-27 15:00:00,84969.99,86145.52,84640.75,85500.84,4924.24541
|
|
||||||
2025-02-27 16:00:00,85500.84,85691.51,84355.04,84471.69,1998.80556
|
|
||||||
2025-02-27 17:00:00,84471.7,85039.67,84355.05,84597.0,2099.43718
|
|
||||||
2025-02-27 18:00:00,84597.0,84990.24,84516.0,84640.0,926.58937
|
|
||||||
2025-02-27 19:00:00,84640.0,84647.96,83614.75,84113.98,1733.10248
|
|
||||||
2025-02-27 20:00:00,84113.98,84164.97,82716.49,83524.98,3204.30191
|
|
||||||
2025-02-27 21:00:00,83522.02,84433.97,83223.74,84409.18,1878.95127
|
|
||||||
2025-02-27 22:00:00,84409.18,84758.63,83881.08,84716.34,994.53745
|
|
||||||
2025-02-27 23:00:00,84716.01,84927.84,84422.02,84708.58,789.30768
|
|
||||||
2025-02-28 00:00:00,84708.57,84892.85,84214.43,84258.65,919.55976
|
|
||||||
2025-02-28 01:00:00,84258.65,84405.17,81111.0,81661.55,7113.15311
|
|
||||||
2025-02-28 02:00:00,81661.55,82874.0,79532.0,80805.5,11937.38959
|
|
||||||
2025-02-28 03:00:00,80805.5,81539.25,80256.05,80452.44,4133.11073
|
|
||||||
2025-02-28 04:00:00,80452.89,80969.69,79400.0,80410.5,5207.4945
|
|
||||||
2025-02-28 05:00:00,80410.49,80547.32,79152.0,79973.79,4262.42065
|
|
||||||
2025-02-28 06:00:00,79973.79,80440.44,79034.97,79344.56,3849.39411
|
|
||||||
2025-02-28 07:00:00,79344.56,80275.72,78820.06,79204.59,4679.55925
|
|
||||||
2025-02-28 08:00:00,79204.6,79438.33,78258.52,78975.99,4691.80611
|
|
||||||
2025-02-28 09:00:00,78975.98,80729.91,78896.22,80343.11,3560.42579
|
|
||||||
2025-02-28 10:00:00,80343.11,80467.43,79544.71,80011.02,1692.08922
|
|
||||||
2025-02-28 11:00:00,80011.02,80854.46,79981.13,80513.38,2009.50205
|
|
||||||
2025-02-28 12:00:00,80512.97,80829.92,80075.47,80779.61,1874.89584
|
|
||||||
2025-02-28 13:00:00,80779.62,81764.94,80621.1,81512.42,3963.49211
|
|
||||||
2025-02-28 14:00:00,81512.42,82549.47,81072.23,82105.05,4593.23048
|
|
||||||
2025-02-28 15:00:00,82105.06,84425.89,81998.26,84005.05,5193.46468
|
|
||||||
2025-02-28 16:00:00,84005.05,84920.0,83426.0,84363.69,3435.29976
|
|
||||||
2025-02-28 17:00:00,84363.68,84400.0,83584.9,83605.12,1966.06295
|
|
||||||
2025-02-28 18:00:00,83605.11,85120.0,83196.71,84440.97,2582.5555
|
|
||||||
2025-02-28 19:00:00,84440.97,84810.35,84209.84,84449.99,1656.89446
|
|
||||||
2025-02-28 20:00:00,84450.0,84795.03,83600.0,84191.27,2215.50549
|
|
||||||
2025-02-28 21:00:00,84190.51,84596.0,84000.17,84258.37,862.30163
|
|
||||||
2025-02-28 22:00:00,84258.38,84481.14,83888.0,84149.98,650.29671
|
|
||||||
2025-02-28 23:00:00,84149.99,84548.43,84014.0,84349.94,598.13521
|
|
||||||
2025-03-01 00:00:00,84349.95,84628.93,83824.78,83857.92,636.60016
|
|
||||||
2025-03-01 01:00:00,83857.91,84870.68,83845.86,84656.93,1147.78686
|
|
||||||
2025-03-01 02:00:00,84656.93,85678.11,84358.34,85240.46,2313.11212
|
|
||||||
2025-03-01 03:00:00,85240.45,85511.91,84961.44,85330.84,1000.29878
|
|
||||||
2025-03-01 04:00:00,85330.83,86558.0,85304.0,86190.01,1664.09676
|
|
||||||
2025-03-01 05:00:00,86190.0,86266.25,85569.91,85588.19,871.35628
|
|
||||||
2025-03-01 06:00:00,85588.19,85663.49,84828.86,85075.62,1526.55776
|
|
||||||
2025-03-01 07:00:00,85075.63,85075.63,84740.15,84740.15,3116.38081
|
|
||||||
2025-03-01 08:00:00,84740.16,85335.19,84740.15,85303.37,1088.87896
|
|
||||||
2025-03-01 09:00:00,85303.37,85508.79,85118.03,85380.0,639.99578
|
|
||||||
2025-03-01 10:00:00,85379.99,85436.0,84800.0,84900.01,1584.91058
|
|
||||||
2025-03-01 11:00:00,84900.01,84967.92,84382.62,84632.79,1523.87619
|
|
||||||
2025-03-01 12:00:00,84632.79,84943.08,84477.25,84758.86,1051.89415
|
|
||||||
2025-03-01 13:00:00,84758.86,84935.68,84334.39,84532.24,1237.26383
|
|
||||||
2025-03-01 14:00:00,84532.25,84858.56,84423.98,84713.99,638.44367
|
|
||||||
2025-03-01 15:00:00,84714.0,84924.53,84560.89,84800.0,629.19528
|
|
||||||
2025-03-01 16:00:00,84800.01,85298.79,84700.0,85274.82,935.29327
|
|
||||||
2025-03-01 17:00:00,85274.81,85373.4,84958.05,85214.7,999.09519
|
|
||||||
2025-03-01 18:00:00,85214.69,85338.64,85075.47,85327.57,410.57763
|
|
||||||
2025-03-01 19:00:00,85327.58,85591.84,85207.54,85485.41,490.48001
|
|
||||||
2025-03-01 20:00:00,85485.41,85595.95,85339.62,85537.73,370.51549
|
|
||||||
2025-03-01 21:00:00,85537.73,86150.0,85285.88,86076.86,646.38322
|
|
||||||
2025-03-01 22:00:00,86076.86,86262.74,85668.3,86116.35,814.1121
|
|
||||||
2025-03-01 23:00:00,86116.36,86200.0,85679.24,86064.53,447.94976
|
|
||||||
2025-03-02 00:00:00,86064.54,86318.18,85801.0,86026.62,551.73014
|
|
||||||
2025-03-02 01:00:00,86026.62,86377.44,85990.0,86354.93,560.64417
|
|
||||||
2025-03-02 02:00:00,86354.94,86498.0,85611.79,85774.01,845.94134
|
|
||||||
2025-03-02 03:00:00,85774.01,86219.82,85773.59,85873.07,445.52217
|
|
||||||
2025-03-02 04:00:00,85873.06,85886.0,85531.87,85752.8,704.94079
|
|
||||||
2025-03-02 05:00:00,85752.8,86048.61,85672.84,85957.51,350.53721
|
|
||||||
2025-03-02 06:00:00,85957.51,86589.97,85923.58,86277.65,583.42796
|
|
||||||
2025-03-02 07:00:00,86277.66,86350.0,86075.09,86232.42,577.2376
|
|
||||||
2025-03-02 08:00:00,86232.42,86263.23,85708.93,85754.71,704.89479
|
|
||||||
2025-03-02 09:00:00,85754.72,86120.0,85633.13,85901.3,567.84232
|
|
||||||
2025-03-02 10:00:00,85901.3,86008.81,85792.45,86006.0,442.91374
|
|
||||||
2025-03-02 11:00:00,86006.01,86220.22,85954.0,86105.89,365.30841
|
|
||||||
2025-03-02 12:00:00,86105.88,86105.89,85750.0,85986.18,363.10992
|
|
||||||
2025-03-02 13:00:00,85986.19,86000.0,85477.81,85710.81,554.51268
|
|
||||||
2025-03-02 14:00:00,85710.81,85911.84,85050.6,85107.45,1406.02942
|
|
||||||
2025-03-02 15:00:00,85107.45,88673.09,85075.47,87427.99,7619.5081
|
|
||||||
2025-03-02 16:00:00,87428.0,91959.99,87278.51,91200.0,11741.72701
|
|
||||||
2025-03-02 17:00:00,91200.0,95000.0,90636.0,94093.75,11108.97765
|
|
||||||
2025-03-02 18:00:00,94093.74,94399.8,92364.39,92794.19,5106.30539
|
|
||||||
2025-03-02 19:00:00,92794.19,93555.81,92608.69,93522.0,2361.41051
|
|
||||||
2025-03-02 20:00:00,93522.01,94446.27,93502.0,94330.0,2236.99961
|
|
||||||
2025-03-02 21:00:00,94330.0,94419.83,93670.72,94298.0,1491.71759
|
|
||||||
2025-03-02 22:00:00,94298.01,94760.0,93911.11,94247.4,1309.20675
|
|
||||||
2025-03-02 23:00:00,94247.4,94883.51,93800.0,94270.0,2888.64518
|
|
||||||
2025-03-03 00:00:00,94269.99,94416.46,93289.79,93314.89,1894.72864
|
|
||||||
2025-03-03 01:00:00,93314.89,93419.99,92649.58,93084.64,1902.36584
|
|
||||||
2025-03-03 02:00:00,93084.64,93442.83,92433.5,93318.24,1492.22069
|
|
||||||
2025-03-03 03:00:00,93318.23,93437.68,92450.0,92709.98,1333.44949
|
|
||||||
2025-03-03 04:00:00,92709.98,92947.67,92590.25,92866.0,996.20708
|
|
||||||
2025-03-03 05:00:00,92866.01,93361.46,92830.44,93344.0,1122.18278
|
|
||||||
2025-03-03 06:00:00,93344.78,93344.78,91341.64,91645.99,2089.28035
|
|
||||||
2025-03-03 07:00:00,91646.0,92564.3,91181.0,92363.58,1644.8485
|
|
||||||
2025-03-03 08:00:00,92363.58,92394.03,91142.0,91477.54,1993.37636
|
|
||||||
2025-03-03 09:00:00,91477.54,92082.75,91190.27,91847.76,1993.82412
|
|
||||||
2025-03-03 10:00:00,91847.76,92581.86,91546.71,92546.77,1133.77337
|
|
||||||
2025-03-03 11:00:00,92546.76,92917.99,92415.57,92739.99,1193.27894
|
|
||||||
2025-03-03 12:00:00,92740.0,93255.58,92663.19,93027.34,1342.19068
|
|
||||||
2025-03-03 13:00:00,93027.35,93690.0,92882.38,93586.83,1261.80234
|
|
||||||
2025-03-03 14:00:00,93586.82,93721.37,89136.45,89278.88,6172.59027
|
|
||||||
2025-03-03 15:00:00,89264.51,91307.0,89117.0,90035.37,5771.66863
|
|
||||||
2025-03-03 16:00:00,90035.38,90750.91,89691.02,90154.0,2111.23659
|
|
||||||
2025-03-03 17:00:00,90154.0,91000.0,89912.0,90300.01,1626.75033
|
|
||||||
2025-03-03 18:00:00,90300.01,90450.47,86781.98,87132.8,7319.60685
|
|
||||||
2025-03-03 19:00:00,87132.8,87724.13,85555.45,85606.0,5058.10691
|
|
||||||
2025-03-03 20:00:00,85606.01,86412.0,85200.01,86074.0,5130.45282
|
|
||||||
2025-03-03 21:00:00,86073.99,86582.12,85117.11,85374.33,1914.40913
|
|
||||||
2025-03-03 22:00:00,85374.33,87038.46,85308.0,86650.96,1533.83791
|
|
||||||
2025-03-03 23:00:00,86650.96,86808.0,86000.0,86220.61,1138.91356
|
|
||||||
2025-03-04 00:00:00,86221.16,86807.98,85880.0,86046.43,1487.09374
|
|
||||||
2025-03-04 01:00:00,86046.43,86196.0,82641.51,83380.55,5790.28933
|
|
||||||
2025-03-04 02:00:00,83380.72,84413.62,82464.84,83936.41,4168.58785
|
|
||||||
2025-03-04 03:00:00,83936.41,84465.48,83602.81,83983.72,1821.83781
|
|
||||||
2025-03-04 04:00:00,83983.73,84037.74,83315.9,83416.95,1308.62514
|
|
||||||
2025-03-04 05:00:00,83416.95,84581.6,83354.27,84273.99,1392.08932
|
|
||||||
2025-03-04 06:00:00,84274.0,84411.76,83584.9,83883.58,854.00368
|
|
||||||
2025-03-04 07:00:00,83884.06,84024.0,83140.0,83186.4,1401.0781
|
|
||||||
2025-03-04 08:00:00,83186.4,83934.51,83040.29,83893.09,1496.65224
|
|
||||||
2025-03-04 09:00:00,83893.09,84181.53,83642.01,83748.0,990.95954
|
|
||||||
2025-03-04 10:00:00,83748.0,84067.02,83492.02,83971.93,845.80624
|
|
||||||
2025-03-04 11:00:00,83971.93,84373.84,83698.11,83937.94,1132.88002
|
|
||||||
2025-03-04 12:00:00,83937.93,83979.66,83248.66,83763.73,1007.91883
|
|
||||||
2025-03-04 13:00:00,83763.73,83764.56,82300.0,82804.0,2427.20219
|
|
||||||
2025-03-04 14:00:00,82804.0,84977.65,82062.06,83201.41,6008.25724
|
|
||||||
2025-03-04 15:00:00,83201.4,83373.42,81500.0,82985.06,4824.82043
|
|
||||||
2025-03-04 16:00:00,82985.05,83825.72,82013.11,83555.37,2320.23524
|
|
||||||
2025-03-04 17:00:00,83555.5,85529.8,83406.37,85443.89,2938.55269
|
|
||||||
2025-03-04 18:00:00,85443.88,86927.68,85148.68,86764.24,2506.83663
|
|
||||||
2025-03-04 19:00:00,86764.25,88487.91,86537.71,88206.19,4362.43625
|
|
||||||
2025-03-04 20:00:00,88206.19,88840.03,86692.9,86825.27,2560.3595
|
|
||||||
2025-03-04 21:00:00,86825.27,88967.52,86582.27,87517.49,2669.37298
|
|
||||||
2025-03-04 22:00:00,87517.49,87969.77,87416.0,87486.0,666.89699
|
|
||||||
2025-03-04 23:00:00,87485.99,87747.3,87088.96,87281.98,626.31508
|
|
||||||
2025-03-05 00:00:00,87281.98,87281.98,86334.53,86800.36,1522.96488
|
|
||||||
2025-03-05 01:00:00,86800.36,87794.52,86603.78,87643.11,1122.00439
|
|
||||||
2025-03-05 02:00:00,87643.11,87888.88,87029.56,87466.55,1118.23188
|
|
||||||
2025-03-05 03:00:00,87466.55,87888.0,86777.0,86843.4,1006.57606
|
|
||||||
2025-03-05 04:00:00,86843.4,87398.0,86722.81,86915.94,1897.85746
|
|
||||||
2025-03-05 05:00:00,86915.93,87245.0,86848.46,87145.98,684.96672
|
|
||||||
2025-03-05 06:00:00,87145.99,87588.6,87000.0,87490.12,549.24596
|
|
||||||
2025-03-05 07:00:00,87490.13,87727.27,87360.1,87619.49,751.77648
|
|
||||||
2025-03-05 08:00:00,87619.49,87924.68,87600.0,87760.0,607.65237
|
|
||||||
2025-03-05 09:00:00,87760.0,88829.68,87673.04,88469.01,1438.43365
|
|
||||||
2025-03-05 10:00:00,88469.0,90660.0,88363.13,89926.07,3368.49428
|
|
||||||
2025-03-05 11:00:00,89926.07,90127.0,89564.0,89814.26,1336.71799
|
|
||||||
2025-03-05 12:00:00,89814.26,90933.82,89814.26,90355.97,2250.85661
|
|
||||||
2025-03-05 13:00:00,90355.97,90588.0,89103.52,89473.92,3234.22601
|
|
||||||
2025-03-05 14:00:00,89473.92,89839.01,88157.25,88519.93,3303.02094
|
|
||||||
2025-03-05 15:00:00,88519.94,89550.72,87926.06,88344.25,3436.16876
|
|
||||||
2025-03-05 16:00:00,88344.25,89480.0,87600.0,88926.53,3228.25926
|
|
||||||
2025-03-05 17:00:00,88927.14,90475.3,88785.63,89687.99,2097.96445
|
|
||||||
2025-03-05 18:00:00,89687.99,89943.4,88881.66,89712.0,1534.43714
|
|
||||||
2025-03-05 19:00:00,89712.01,90152.0,89472.26,89750.0,895.33123
|
|
||||||
2025-03-05 20:00:00,89750.0,90555.0,89677.8,90499.64,910.19826
|
|
||||||
2025-03-05 21:00:00,90499.64,90700.0,89932.13,90377.54,650.85875
|
|
||||||
2025-03-05 22:00:00,90377.27,90536.91,90026.96,90449.46,542.42499
|
|
||||||
2025-03-05 23:00:00,90449.47,91000.0,90296.04,90606.01,775.34311
|
|
||||||
2025-03-06 00:00:00,90606.0,90815.31,89986.73,90046.34,842.00257
|
|
||||||
2025-03-06 01:00:00,90046.35,91903.01,90046.35,91057.19,1922.33779
|
|
||||||
2025-03-06 02:00:00,91057.2,92000.0,90999.77,91812.0,1603.59852
|
|
||||||
2025-03-06 03:00:00,91811.99,92025.0,91405.76,91605.01,970.85739
|
|
||||||
2025-03-06 04:00:00,91605.02,92454.44,91526.36,92287.45,1011.95626
|
|
||||||
2025-03-06 05:00:00,92287.45,92810.64,92124.0,92737.98,1140.44077
|
|
||||||
2025-03-06 06:00:00,92737.99,92769.26,91784.35,91916.0,1461.30795
|
|
||||||
2025-03-06 07:00:00,91916.01,91993.83,91265.96,91346.48,878.69496
|
|
||||||
2025-03-06 08:00:00,91346.49,91590.99,91121.34,91156.08,1002.72035
|
|
||||||
2025-03-06 09:00:00,91156.08,91176.0,90613.17,90752.14,1048.44799
|
|
||||||
2025-03-06 10:00:00,90752.14,91667.5,90613.37,91290.88,1054.01322
|
|
||||||
2025-03-06 11:00:00,91290.89,91606.53,91003.71,91350.0,789.74756
|
|
||||||
2025-03-06 12:00:00,91350.0,91399.01,89829.02,90102.02,1644.55126
|
|
||||||
2025-03-06 13:00:00,90102.02,90471.02,89450.06,89569.13,1518.79331
|
|
||||||
2025-03-06 14:00:00,89569.13,90387.37,89117.68,89803.99,2999.48127
|
|
||||||
2025-03-06 15:00:00,89803.99,91457.03,89631.91,90820.69,2271.16246
|
|
||||||
2025-03-06 16:00:00,90820.69,91066.44,89220.0,89316.02,1916.09183
|
|
||||||
2025-03-06 17:00:00,89316.03,89360.8,88007.19,88628.01,2520.88804
|
|
||||||
2025-03-06 18:00:00,88628.0,88965.18,88140.07,88428.41,1182.1183
|
|
||||||
2025-03-06 19:00:00,88428.41,89395.83,87836.0,88526.18,1528.60773
|
|
||||||
2025-03-06 20:00:00,88526.23,89370.46,88461.22,89045.44,1217.7098
|
|
||||||
2025-03-06 21:00:00,89047.25,89940.0,88971.09,89939.0,1000.11785
|
|
||||||
2025-03-06 22:00:00,89939.19,90781.2,89515.13,90313.66,1969.31411
|
|
||||||
2025-03-06 23:00:00,90313.66,90571.78,89889.24,89931.89,847.48773
|
|
||||||
2025-03-07 00:00:00,89931.88,91283.02,84667.03,85529.22,9907.82883
|
|
||||||
2025-03-07 01:00:00,85529.23,88983.8,85200.0,88263.85,5350.69975
|
|
||||||
2025-03-07 02:00:00,88263.84,88460.0,87235.0,87235.0,2042.75158
|
|
||||||
2025-03-07 03:00:00,87235.01,87694.0,86778.57,86991.25,1392.05525
|
|
||||||
2025-03-07 04:00:00,86991.26,87885.65,86857.14,87636.41,973.84984
|
|
||||||
2025-03-07 05:00:00,87636.41,88450.0,87587.34,88221.21,1066.61425
|
|
||||||
2025-03-07 06:00:00,88221.21,88357.52,87801.47,88208.23,1036.79142
|
|
||||||
2025-03-07 07:00:00,88208.23,88612.96,88100.36,88365.18,873.47903
|
|
||||||
2025-03-07 08:00:00,88365.19,88486.65,87985.3,88365.78,731.44291
|
|
||||||
2025-03-07 09:00:00,88365.77,89704.0,88357.55,88933.7,1315.00452
|
|
||||||
2025-03-07 10:00:00,88933.71,89285.27,88740.73,89263.4,627.13976
|
|
||||||
2025-03-07 11:00:00,89263.41,89452.0,88821.97,88966.96,695.2888
|
|
||||||
2025-03-07 12:00:00,88966.96,89320.98,88292.46,89051.13,1390.78884
|
|
||||||
2025-03-07 13:00:00,89050.76,90346.13,88977.94,89135.98,3156.31229
|
|
||||||
2025-03-07 14:00:00,89135.99,91100.0,88655.34,90440.74,3357.70486
|
|
||||||
2025-03-07 15:00:00,90440.74,90787.53,88022.47,88457.99,4428.55225
|
|
||||||
2025-03-07 16:00:00,88457.99,88983.33,86623.43,87237.37,4416.63143
|
|
||||||
2025-03-07 17:00:00,87237.36,88636.36,86554.77,87685.99,3478.29411
|
|
||||||
2025-03-07 18:00:00,87685.99,89023.41,87507.35,88589.33,1975.64801
|
|
||||||
2025-03-07 19:00:00,88589.33,88911.51,87315.94,87820.1,1543.74974
|
|
||||||
2025-03-07 20:00:00,87820.09,88511.0,86544.27,86987.72,2594.08291
|
|
||||||
2025-03-07 21:00:00,86986.73,87373.13,85573.07,86314.63,3337.15538
|
|
||||||
2025-03-07 22:00:00,86314.64,86845.67,85600.37,86745.15,1305.83593
|
|
||||||
2025-03-07 23:00:00,86745.14,86836.0,85976.0,86801.75,982.65544
|
|
||||||
2025-03-08 00:00:00,86801.74,86897.25,86538.59,86726.8,774.07981
|
|
||||||
2025-03-08 01:00:00,86726.79,86761.5,85218.47,86105.99,1381.62062
|
|
||||||
2025-03-08 02:00:00,86105.99,86341.13,85943.25,86180.19,717.50376
|
|
||||||
2025-03-08 03:00:00,86180.19,86183.83,85456.0,85919.12,557.03426
|
|
||||||
2025-03-08 04:00:00,85919.12,86500.0,85653.66,86444.53,642.75837
|
|
||||||
2025-03-08 05:00:00,86444.52,86462.7,86019.11,86279.12,302.64705
|
|
||||||
2025-03-08 06:00:00,86279.13,86662.0,86270.0,86465.0,565.6798
|
|
||||||
2025-03-08 07:00:00,86464.99,86535.18,86106.37,86197.14,460.04445
|
|
||||||
2025-03-08 08:00:00,86197.14,86326.26,86040.01,86071.57,448.83906
|
|
||||||
2025-03-08 09:00:00,86071.57,86450.01,85828.2,85943.28,494.55943
|
|
||||||
2025-03-08 10:00:00,85943.29,86264.0,85943.28,86123.19,371.08485
|
|
||||||
2025-03-08 11:00:00,86123.2,86132.78,85762.72,85955.88,460.84829
|
|
||||||
2025-03-08 12:00:00,85955.89,86411.17,85808.82,86209.72,649.57361
|
|
||||||
2025-03-08 13:00:00,86209.72,86537.84,86128.72,86327.96,826.86145
|
|
||||||
2025-03-08 14:00:00,86327.96,86626.32,86205.33,86361.69,503.84305
|
|
||||||
2025-03-08 15:00:00,86361.68,86627.51,86200.0,86400.0,466.88681
|
|
||||||
2025-03-08 16:00:00,86400.0,86450.97,85818.7,86148.09,581.9076
|
|
||||||
2025-03-08 17:00:00,86148.1,86155.89,85661.76,85943.08,836.73089
|
|
||||||
2025-03-08 18:00:00,85943.07,86084.45,85869.44,86044.02,215.55897
|
|
||||||
2025-03-08 19:00:00,86044.02,86500.0,85800.0,86233.52,587.19603
|
|
||||||
2025-03-08 20:00:00,86233.52,86368.59,86185.0,86281.39,192.8745
|
|
||||||
2025-03-08 21:00:00,86281.4,86500.0,86200.0,86228.16,219.78668
|
|
||||||
2025-03-08 22:00:00,86228.17,86382.58,85852.12,86350.0,400.87514
|
|
||||||
2025-03-08 23:00:00,86350.01,86400.0,86066.17,86222.45,330.43606
|
|
||||||
2025-03-09 00:00:00,86222.46,86463.0,86116.6,86416.12,350.50278
|
|
||||||
2025-03-09 01:00:00,86416.11,86500.0,86098.33,86198.39,257.53715
|
|
||||||
2025-03-09 02:00:00,86198.39,86276.42,86001.48,86003.33,283.22646
|
|
||||||
2025-03-09 03:00:00,86003.32,86217.45,85939.85,86115.71,332.21281
|
|
||||||
2025-03-09 04:00:00,86115.71,86125.27,85942.0,86053.74,291.30421
|
|
||||||
2025-03-09 05:00:00,86053.74,86158.83,85976.02,86034.01,217.69929
|
|
||||||
2025-03-09 06:00:00,86034.0,86048.0,85809.48,85910.0,409.14686
|
|
||||||
2025-03-09 07:00:00,85910.0,86156.17,85846.91,86004.61,457.17501
|
|
||||||
2025-03-09 08:00:00,86004.61,86098.0,85777.0,85792.0,297.44422
|
|
||||||
2025-03-09 09:00:00,85792.0,85891.64,85382.39,85564.0,471.15347
|
|
||||||
2025-03-09 10:00:00,85564.0,85620.0,84708.01,84844.1,1265.34082
|
|
||||||
2025-03-09 11:00:00,84844.11,85098.38,84625.01,85024.01,949.54327
|
|
||||||
2025-03-09 12:00:00,85024.0,85058.34,84240.36,84599.99,1390.32594
|
|
||||||
2025-03-09 13:00:00,84600.0,84700.0,83483.13,83489.95,1412.09732
|
|
||||||
2025-03-09 14:00:00,83489.96,83817.25,83000.0,83602.57,1909.96106
|
|
||||||
2025-03-09 15:00:00,83602.58,83768.34,82711.5,83080.41,2011.81321
|
|
||||||
2025-03-09 16:00:00,83080.4,83758.22,82327.59,82496.01,2326.08392
|
|
||||||
2025-03-09 17:00:00,82496.01,82944.0,82223.0,82366.0,1427.44276
|
|
||||||
2025-03-09 18:00:00,82365.99,83340.58,82264.0,82670.0,1491.41884
|
|
||||||
2025-03-09 19:00:00,82670.0,82988.11,82336.36,82640.86,1010.21232
|
|
||||||
2025-03-09 20:00:00,82640.86,83539.66,82615.38,83083.15,858.57068
|
|
||||||
2025-03-09 21:00:00,83083.16,83090.02,81555.0,81845.11,1681.2986
|
|
||||||
2025-03-09 22:00:00,81845.12,82066.23,80202.53,80358.19,2498.36258
|
|
||||||
2025-03-09 23:00:00,80358.18,81042.41,80000.0,80734.37,2515.51987
|
|
||||||
2025-03-10 00:00:00,80734.48,81500.0,80037.62,81292.0,2119.91701
|
|
||||||
2025-03-10 01:00:00,81292.01,81888.0,80802.59,81766.01,1700.61474
|
|
||||||
2025-03-10 02:00:00,81766.01,82744.0,81526.25,82198.12,1747.3959
|
|
||||||
2025-03-10 03:00:00,82198.12,82333.37,81791.82,82047.49,1063.09038
|
|
||||||
2025-03-10 04:00:00,82047.49,82659.14,82004.0,82634.05,647.00669
|
|
||||||
2025-03-10 05:00:00,82634.05,82797.95,82124.35,82308.52,761.63193
|
|
||||||
2025-03-10 06:00:00,82308.51,82425.71,81977.27,82332.67,718.31521
|
|
||||||
2025-03-10 07:00:00,82333.81,82687.5,82048.44,82318.77,735.02286
|
|
||||||
2025-03-10 08:00:00,82318.78,82391.75,81151.0,81350.46,1240.84016
|
|
||||||
2025-03-10 09:00:00,81350.46,84123.46,81325.32,82514.26,3135.7787
|
|
||||||
2025-03-10 10:00:00,82514.27,82683.99,81630.59,82288.19,960.18393
|
|
||||||
2025-03-10 11:00:00,82288.19,83826.02,82065.73,83582.0,1355.26923
|
|
||||||
2025-03-10 12:00:00,83582.0,83771.94,82634.16,83248.12,1804.51434
|
|
||||||
2025-03-10 13:00:00,83248.12,83313.99,81423.51,81528.94,2151.30539
|
|
||||||
2025-03-10 14:00:00,81528.93,81528.93,79177.03,79847.08,6049.79262
|
|
||||||
2025-03-10 15:00:00,79847.09,81118.32,79570.57,80043.85,2855.95678
|
|
||||||
2025-03-10 16:00:00,80043.84,80250.0,78992.1,79009.98,2435.72646
|
|
||||||
2025-03-10 17:00:00,79009.01,79327.98,78202.0,78459.91,4021.36721
|
|
||||||
2025-03-10 18:00:00,78459.9,79147.05,77501.1,77655.26,3305.83539
|
|
||||||
2025-03-10 19:00:00,77655.25,79533.57,77459.91,79060.84,3706.69015
|
|
||||||
2025-03-10 20:00:00,79060.84,79729.73,78409.4,79304.98,1749.77269
|
|
||||||
2025-03-10 21:00:00,79304.97,79886.03,78959.9,79565.81,1079.83363
|
|
||||||
2025-03-10 22:00:00,79566.86,79845.42,79200.0,79502.36,890.45364
|
|
||||||
2025-03-10 23:00:00,79502.36,79708.0,78449.98,78595.86,1397.06901
|
|
||||||
2025-03-11 00:00:00,78595.86,79235.77,76606.0,77673.6,5219.87886
|
|
||||||
2025-03-11 01:00:00,77673.6,79130.93,76816.52,79130.66,3194.38716
|
|
||||||
2025-03-11 02:00:00,79130.65,79650.0,78938.0,79354.53,1754.34261
|
|
||||||
2025-03-11 03:00:00,79354.54,79938.27,79279.9,79522.29,953.63506
|
|
||||||
2025-03-11 04:00:00,79522.29,80245.26,79439.9,79759.91,1261.6828
|
|
||||||
2025-03-11 05:00:00,79759.91,80457.0,79719.87,80261.78,1400.52299
|
|
||||||
2025-03-11 06:00:00,80261.78,80547.79,80091.18,80099.33,727.41838
|
|
||||||
2025-03-11 07:00:00,80099.32,80614.89,80088.0,80424.95,1177.73486
|
|
||||||
2025-03-11 08:00:00,80424.94,81525.62,80325.22,81131.16,2032.15403
|
|
||||||
2025-03-11 09:00:00,81131.15,81900.05,81129.98,81460.39,1344.56881
|
|
||||||
2025-03-11 10:00:00,81460.38,81886.31,81269.85,81808.26,1037.52973
|
|
||||||
2025-03-11 11:00:00,81808.25,82092.34,81400.0,81720.34,1184.00715
|
|
||||||
2025-03-11 12:00:00,81720.33,81909.69,81017.0,81225.5,1849.7363
|
|
||||||
2025-03-11 13:00:00,81225.49,82225.21,80536.33,80898.49,4637.72925
|
|
||||||
2025-03-11 14:00:00,80898.5,81374.92,79058.0,79773.04,6591.00781
|
|
||||||
2025-03-11 15:00:00,79773.05,81666.0,79653.69,81232.24,3555.27893
|
|
||||||
2025-03-11 16:00:00,81232.23,81900.0,80609.96,81386.8,2212.88519
|
|
||||||
2025-03-11 17:00:00,81386.79,81727.27,80777.75,81305.57,1542.25683
|
|
||||||
2025-03-11 18:00:00,81305.58,83470.01,81288.61,83280.94,2780.96936
|
|
||||||
2025-03-11 19:00:00,83280.95,83617.4,82846.31,83104.01,1549.39261
|
|
||||||
2025-03-11 20:00:00,83104.01,83170.38,82586.84,82799.68,845.3044
|
|
||||||
2025-03-11 21:00:00,82799.68,83265.38,82799.67,83110.72,576.90124
|
|
||||||
2025-03-11 22:00:00,83110.71,83306.87,82981.3,83133.48,560.5699
|
|
||||||
2025-03-11 23:00:00,83133.48,83185.0,82550.52,82932.99,780.17427
|
|
||||||
2025-03-12 00:00:00,82932.99,83142.11,82604.95,82765.52,658.58485
|
|
||||||
2025-03-12 01:00:00,82765.52,83783.26,82752.58,83313.6,1260.16624
|
|
||||||
2025-03-12 02:00:00,83313.6,83330.04,82217.01,82430.79,1790.05082
|
|
||||||
2025-03-12 03:00:00,82430.8,82620.01,82100.61,82127.72,1003.13679
|
|
||||||
2025-03-12 04:00:00,82127.73,82180.0,81617.76,81811.05,1145.12345
|
|
||||||
2025-03-12 05:00:00,81811.05,82048.93,81617.0,81987.43,734.00302
|
|
||||||
2025-03-12 06:00:00,81987.44,81987.44,81296.0,81425.98,774.02218
|
|
||||||
2025-03-12 07:00:00,81425.97,82541.47,81332.66,82447.79,1412.35169
|
|
||||||
2025-03-12 08:00:00,82447.78,82841.82,82000.29,82689.67,1582.29339
|
|
||||||
2025-03-12 09:00:00,82689.66,82933.32,82228.09,82680.06,1177.88498
|
|
||||||
2025-03-12 10:00:00,82680.06,82771.05,82106.09,82492.67,1102.97753
|
|
||||||
2025-03-12 11:00:00,82492.67,83076.79,82430.01,83004.45,1011.46578
|
|
||||||
2025-03-12 12:00:00,83004.45,84539.85,82873.2,84069.0,3646.97894
|
|
||||||
2025-03-12 13:00:00,84069.01,84217.12,82357.44,82686.27,3372.60984
|
|
||||||
2025-03-12 14:00:00,82686.26,82920.35,81528.58,81535.72,2050.67834
|
|
||||||
2025-03-12 15:00:00,81535.72,82013.4,80607.65,81738.97,2743.33871
|
|
||||||
2025-03-12 16:00:00,81738.97,82287.52,81626.64,81970.76,1321.98489
|
|
||||||
2025-03-12 17:00:00,81970.77,82884.51,81780.01,81844.41,1198.71111
|
|
||||||
2025-03-12 18:00:00,81844.41,82803.76,81723.0,82628.68,844.66253
|
|
||||||
2025-03-12 19:00:00,82628.67,83165.46,82487.85,82915.93,850.153
|
|
||||||
2025-03-12 20:00:00,82915.94,83189.23,82709.36,83124.95,576.74871
|
|
||||||
2025-03-12 21:00:00,83124.95,83321.51,82852.29,83093.69,387.93382
|
|
||||||
2025-03-12 22:00:00,83093.69,83693.0,83087.4,83651.15,607.96587
|
|
||||||
2025-03-12 23:00:00,83651.15,83887.26,83409.09,83680.12,680.15952
|
|
||||||
2025-03-13 00:00:00,83680.12,84000.0,83438.67,83444.88,1194.25124
|
|
||||||
2025-03-13 01:00:00,83444.89,84222.0,83312.92,83919.68,950.45575
|
|
||||||
2025-03-13 02:00:00,83919.69,84336.33,83743.14,83785.58,902.34187
|
|
||||||
2025-03-13 03:00:00,83785.59,84199.54,83542.53,83577.14,720.43836
|
|
||||||
2025-03-13 04:00:00,83577.14,83709.03,83201.91,83286.63,739.22902
|
|
||||||
2025-03-13 05:00:00,83286.63,83540.83,83077.0,83157.16,747.63636
|
|
||||||
2025-03-13 06:00:00,83157.16,83329.42,82851.0,83292.6,814.14759
|
|
||||||
2025-03-13 07:00:00,83292.6,83559.06,83127.98,83151.99,783.38238
|
|
||||||
2025-03-13 08:00:00,83152.0,83262.58,82400.04,82973.35,822.40169
|
|
||||||
2025-03-13 09:00:00,82973.34,83522.61,82969.79,83426.87,964.97054
|
|
||||||
2025-03-13 10:00:00,83426.87,83488.43,83001.0,83191.67,754.49967
|
|
||||||
2025-03-13 11:00:00,83191.67,83229.42,82708.29,82975.86,657.69577
|
|
||||||
2025-03-13 12:00:00,82975.86,83900.0,82286.0,82517.99,1673.24224
|
|
||||||
2025-03-13 13:00:00,82518.0,83111.64,81600.0,81777.15,2056.53095
|
|
||||||
2025-03-13 14:00:00,81777.15,82524.46,81546.52,82009.5,1725.43591
|
|
||||||
2025-03-13 15:00:00,82009.51,82193.66,80954.29,81044.72,2175.89338
|
|
||||||
2025-03-13 16:00:00,81044.72,81264.73,80666.0,80876.36,1686.93991
|
|
||||||
2025-03-13 17:00:00,80876.36,80893.02,79939.9,80064.0,2988.91877
|
|
||||||
2025-03-13 18:00:00,80063.99,80957.84,79996.65,80820.05,1638.17538
|
|
||||||
2025-03-13 19:00:00,80820.05,81031.81,80137.47,80211.4,1081.83567
|
|
||||||
2025-03-13 20:00:00,80210.4,80735.83,80189.89,80379.05,770.63142
|
|
||||||
2025-03-13 21:00:00,80379.05,80882.65,80280.66,80819.99,713.67943
|
|
||||||
2025-03-13 22:00:00,80819.99,81400.0,80730.48,81395.54,550.15428
|
|
||||||
2025-03-13 23:00:00,81395.54,81484.23,81046.57,81115.78,433.38654
|
|
||||||
2025-03-14 00:00:00,81115.78,81694.41,80818.84,81503.31,721.32303
|
|
||||||
2025-03-14 01:00:00,81503.31,81876.17,81206.04,81603.08,506.51801
|
|
||||||
2025-03-14 02:00:00,81603.07,82270.51,81588.51,82226.98,714.90216
|
|
||||||
2025-03-14 03:00:00,82226.98,82247.7,81708.96,81888.77,803.86317
|
|
||||||
2025-03-14 04:00:00,81888.77,82040.0,81841.99,81948.16,371.30991
|
|
||||||
2025-03-14 05:00:00,81948.17,82218.03,81820.22,82141.96,622.41868
|
|
||||||
2025-03-14 06:00:00,82141.96,82191.22,81780.08,82038.09,405.28499
|
|
||||||
2025-03-14 07:00:00,82038.09,82454.5,81895.95,82170.85,622.58387
|
|
||||||
2025-03-14 08:00:00,82170.84,82647.21,82170.84,82593.44,641.79384
|
|
||||||
2025-03-14 09:00:00,82593.45,82940.34,82569.31,82776.5,710.88441
|
|
||||||
2025-03-14 10:00:00,82776.51,83300.01,82672.29,83042.22,1268.05152
|
|
||||||
2025-03-14 11:00:00,83042.21,83348.46,82975.33,83253.15,719.70825
|
|
||||||
2025-03-14 12:00:00,83253.15,83526.96,83052.41,83420.87,921.32806
|
|
||||||
2025-03-14 13:00:00,83420.87,83888.0,83040.83,83602.16,1846.65606
|
|
||||||
2025-03-14 14:00:00,83602.15,83602.15,82640.59,83524.8,3070.46253
|
|
||||||
2025-03-14 15:00:00,83524.8,85309.71,83524.8,84724.58,5578.42625
|
|
||||||
2025-03-14 16:00:00,84724.58,84950.0,83949.19,84787.37,2261.19233
|
|
||||||
2025-03-14 17:00:00,84787.37,84838.0,84194.01,84365.18,861.11789
|
|
||||||
2025-03-14 18:00:00,84365.18,84726.42,84203.66,84570.54,803.03307
|
|
||||||
2025-03-14 19:00:00,84570.54,84929.45,84458.44,84554.0,737.1922
|
|
||||||
2025-03-14 20:00:00,84554.0,84554.01,83717.69,84127.2,1000.5735
|
|
||||||
2025-03-14 21:00:00,84127.21,84299.7,83953.57,84205.79,383.76632
|
|
||||||
2025-03-14 22:00:00,84205.79,84512.05,84105.73,84438.06,849.70705
|
|
||||||
2025-03-14 23:00:00,84438.07,84445.2,83950.06,83983.2,436.43045
|
|
||||||
2025-03-15 00:00:00,83983.19,84351.88,83964.57,84297.14,404.85017
|
|
||||||
2025-03-15 01:00:00,84297.13,84583.04,84107.27,84353.85,433.0424
|
|
||||||
2025-03-15 02:00:00,84353.85,84632.08,84160.37,84557.95,452.46432
|
|
||||||
2025-03-15 03:00:00,84557.96,84676.28,84418.38,84491.99,510.72249
|
|
||||||
2025-03-15 04:00:00,84492.0,84559.0,84300.04,84456.57,295.90084
|
|
||||||
2025-03-15 05:00:00,84456.58,84478.03,84266.25,84266.26,264.13452
|
|
||||||
2025-03-15 06:00:00,84266.26,84308.16,84160.37,84183.28,311.33661
|
|
||||||
2025-03-15 07:00:00,84183.28,84192.21,83791.09,83827.31,916.14417
|
|
||||||
2025-03-15 08:00:00,83827.32,84032.9,83618.0,83891.36,779.80417
|
|
||||||
2025-03-15 09:00:00,83891.37,83975.22,83710.32,83966.95,949.50063
|
|
||||||
2025-03-15 10:00:00,83966.95,83994.11,83667.23,83912.19,385.90322
|
|
||||||
2025-03-15 11:00:00,83912.19,84184.58,83842.95,84103.74,436.48745
|
|
||||||
2025-03-15 12:00:00,84103.74,84272.19,84010.58,84136.53,638.42673
|
|
||||||
2025-03-15 13:00:00,84136.53,84499.74,84132.08,84460.0,810.2508
|
|
||||||
2025-03-15 14:00:00,84459.99,84459.99,84106.83,84131.75,859.8038
|
|
||||||
2025-03-15 15:00:00,84131.75,84408.6,84131.75,84346.02,487.41953
|
|
||||||
2025-03-15 16:00:00,84346.03,84498.72,84196.0,84362.91,680.60018
|
|
||||||
2025-03-15 17:00:00,84362.9,84495.43,84277.66,84422.25,392.01683
|
|
||||||
2025-03-15 18:00:00,84422.25,84476.46,84125.01,84223.29,284.29156
|
|
||||||
2025-03-15 19:00:00,84223.28,84320.49,84076.92,84299.98,324.78211
|
|
||||||
2025-03-15 20:00:00,84299.99,84450.0,84299.98,84389.31,174.96258
|
|
||||||
2025-03-15 21:00:00,84389.31,84467.7,84237.58,84388.73,199.25715
|
|
||||||
2025-03-15 22:00:00,84388.73,84450.1,84322.82,84382.65,161.41378
|
|
||||||
2025-03-15 23:00:00,84382.65,84411.69,84268.4,84338.44,171.21716
|
|
||||||
2025-03-16 00:00:00,84338.44,84391.06,84213.03,84253.52,183.74346
|
|
||||||
2025-03-16 01:00:00,84253.53,84277.03,83840.0,83869.31,457.47235
|
|
||||||
2025-03-16 02:00:00,83869.32,84130.39,83733.86,83966.05,320.67384
|
|
||||||
2025-03-16 03:00:00,83966.05,84393.72,83906.26,84332.34,252.10048
|
|
||||||
2025-03-16 04:00:00,84332.35,84418.0,84245.98,84408.97,266.06424
|
|
||||||
2025-03-16 05:00:00,84408.97,84411.85,84305.95,84395.31,207.70079
|
|
||||||
2025-03-16 06:00:00,84395.31,84400.0,84228.0,84228.0,145.78217
|
|
||||||
2025-03-16 07:00:00,84228.01,84373.64,83972.57,84273.96,345.16527
|
|
||||||
2025-03-16 08:00:00,84273.95,84335.43,84162.83,84268.36,218.05774
|
|
||||||
2025-03-16 09:00:00,84268.36,84271.03,83800.0,83897.02,456.91285
|
|
||||||
2025-03-16 10:00:00,83897.02,84066.92,83649.04,83666.98,458.6363
|
|
||||||
2025-03-16 11:00:00,83666.98,83691.61,82381.46,82410.02,1849.0388
|
|
||||||
2025-03-16 12:00:00,82410.01,82931.41,82400.0,82755.89,1045.99634
|
|
||||||
2025-03-16 13:00:00,82755.89,83151.81,82498.38,82596.1,1101.71905
|
|
||||||
2025-03-16 14:00:00,82596.09,83385.97,82575.36,83373.16,862.87399
|
|
||||||
2025-03-16 15:00:00,83373.15,83508.95,82981.14,83320.83,771.86427
|
|
||||||
2025-03-16 16:00:00,83320.83,85117.04,83293.42,83634.53,3344.71639
|
|
||||||
2025-03-16 17:00:00,83633.49,84118.56,83483.45,83913.22,905.46005
|
|
||||||
2025-03-16 18:00:00,83913.22,84094.52,83633.14,83701.75,531.09149
|
|
||||||
2025-03-16 19:00:00,83701.76,83727.28,82562.36,83098.47,1100.6719
|
|
||||||
2025-03-16 20:00:00,83098.47,83453.01,82910.77,83216.98,547.08888
|
|
||||||
2025-03-16 21:00:00,83216.99,83224.31,82688.82,82766.52,482.34007
|
|
||||||
2025-03-16 22:00:00,82766.51,82846.0,82090.7,82093.71,911.28605
|
|
||||||
2025-03-16 23:00:00,82093.72,82632.0,81981.12,82574.53,829.66854
|
|
||||||
2025-03-17 00:00:00,82574.52,83156.5,82523.0,83054.91,514.41819
|
|
||||||
2025-03-17 01:00:00,83054.9,83400.0,82825.55,83233.8,552.38898
|
|
||||||
2025-03-17 02:00:00,83233.8,83375.35,82981.68,83212.51,364.65635
|
|
||||||
2025-03-17 03:00:00,83212.51,83725.39,83212.5,83675.65,708.95371
|
|
||||||
2025-03-17 04:00:00,83675.64,83869.77,83522.0,83615.53,468.72749
|
|
||||||
2025-03-17 05:00:00,83615.53,83615.53,83120.0,83120.0,603.93675
|
|
||||||
2025-03-17 06:00:00,83120.01,83295.07,82682.62,83220.91,642.44701
|
|
||||||
2025-03-17 07:00:00,83220.91,83636.36,83212.83,83567.87,721.46174
|
|
||||||
2025-03-17 08:00:00,83567.88,83719.39,83221.18,83547.17,511.92445
|
|
||||||
2025-03-17 09:00:00,83547.16,83783.02,83431.09,83664.31,550.97551
|
|
||||||
2025-03-17 10:00:00,83664.31,83687.17,83260.04,83344.34,424.06115
|
|
||||||
2025-03-17 11:00:00,83344.34,83699.0,83220.11,83514.73,417.30684
|
|
||||||
2025-03-17 12:00:00,83514.73,84000.0,83002.01,83052.0,1602.93914
|
|
||||||
2025-03-17 13:00:00,83052.01,83735.84,82808.52,83023.38,1353.72595
|
|
||||||
2025-03-17 14:00:00,83023.37,83358.5,82456.0,82913.03,1546.82937
|
|
||||||
2025-03-17 15:00:00,82913.03,83701.71,82706.58,83531.06,969.57355
|
|
||||||
2025-03-17 16:00:00,83531.07,83878.9,83103.6,83448.86,972.11069
|
|
||||||
2025-03-17 17:00:00,83448.86,84336.89,83358.49,84197.46,1045.92714
|
|
||||||
2025-03-17 18:00:00,84197.46,84585.0,83983.74,84496.58,1090.21429
|
|
||||||
2025-03-17 19:00:00,84496.57,84756.83,84311.07,84482.55,725.81983
|
|
||||||
2025-03-17 20:00:00,84482.55,84506.8,83846.15,83954.36,506.33404
|
|
||||||
2025-03-17 21:00:00,83954.36,84251.86,83883.87,84077.8,377.45641
|
|
||||||
2025-03-17 22:00:00,84077.8,84249.99,84000.02,84061.19,218.83761
|
|
||||||
2025-03-17 23:00:00,84061.19,84140.98,83746.12,84010.03,323.71739
|
|
||||||
2025-03-18 00:00:00,84010.02,84021.74,83702.53,83790.24,280.13543
|
|
||||||
2025-03-18 01:00:00,83790.24,83915.98,83015.39,83161.97,903.36529
|
|
||||||
2025-03-18 02:00:00,83161.98,83532.61,83081.67,83367.64,445.46915
|
|
||||||
2025-03-18 03:00:00,83367.65,83367.65,82960.0,83147.6,598.19056
|
|
||||||
2025-03-18 04:00:00,83147.6,83205.77,82838.8,82989.11,621.0049
|
|
||||||
2025-03-18 05:00:00,82989.11,83128.1,82895.57,83010.87,743.9242
|
|
||||||
2025-03-18 06:00:00,83010.86,83019.4,82244.68,82450.01,1234.9382
|
|
||||||
2025-03-18 07:00:00,82450.02,83280.0,82358.69,83163.81,1191.95624
|
|
||||||
2025-03-18 08:00:00,83163.81,83452.3,83048.71,83281.29,526.74962
|
|
||||||
2025-03-18 09:00:00,83281.29,83349.06,82780.0,82840.51,458.74305
|
|
||||||
2025-03-18 10:00:00,82840.5,82942.1,82596.85,82819.65,479.85282
|
|
||||||
2025-03-18 11:00:00,82819.65,82861.76,82500.0,82581.1,394.26285
|
|
||||||
2025-03-18 12:00:00,82581.09,82821.6,82330.38,82404.66,658.78879
|
|
||||||
2025-03-18 13:00:00,82404.65,82480.47,81134.66,81366.64,2072.84164
|
|
||||||
2025-03-18 14:00:00,81365.78,81978.86,81150.28,81854.71,1493.47462
|
|
||||||
2025-03-18 15:00:00,81854.72,82212.93,81537.02,81610.21,1017.77265
|
|
||||||
2025-03-18 16:00:00,81610.22,82160.49,81547.22,81851.51,678.82544
|
|
||||||
2025-03-18 17:00:00,81851.52,81975.8,81332.58,81466.7,894.91848
|
|
||||||
2025-03-18 18:00:00,81466.69,81950.8,81245.65,81889.38,568.60084
|
|
||||||
2025-03-18 19:00:00,81889.38,82476.26,81762.45,82317.21,898.10914
|
|
||||||
2025-03-18 20:00:00,82317.2,82353.8,81870.23,82050.0,438.19738
|
|
||||||
2025-03-18 21:00:00,82050.0,82208.9,81934.76,81991.92,161.01113
|
|
|
501
cache/BTC_USDT_1m_candles.csv
vendored
501
cache/BTC_USDT_1m_candles.csv
vendored
@ -1,501 +0,0 @@
|
|||||||
timestamp,open,high,low,close,volume
|
|
||||||
2025-03-18 13:17:00,82305.87,82332.76,82268.16,82289.4,26.27611
|
|
||||||
2025-03-18 13:18:00,82289.41,82326.09,82289.41,82306.39,6.33955
|
|
||||||
2025-03-18 13:19:00,82306.38,82411.93,82306.38,82411.93,6.50864
|
|
||||||
2025-03-18 13:20:00,82411.92,82461.95,82381.36,82381.36,28.83113
|
|
||||||
2025-03-18 13:21:00,82381.37,82381.37,82317.8,82317.81,10.78886
|
|
||||||
2025-03-18 13:22:00,82317.82,82338.59,82281.67,82283.64,15.53926
|
|
||||||
2025-03-18 13:23:00,82283.63,82317.62,82278.01,82307.87,9.99801
|
|
||||||
2025-03-18 13:24:00,82307.87,82358.5,82281.13,82322.69,14.96988
|
|
||||||
2025-03-18 13:25:00,82322.69,82379.85,82321.21,82343.74,6.39954
|
|
||||||
2025-03-18 13:26:00,82343.73,82378.55,82316.83,82338.17,5.41429
|
|
||||||
2025-03-18 13:27:00,82338.16,82358.7,82316.89,82353.29,6.96557
|
|
||||||
2025-03-18 13:28:00,82353.29,82363.4,82311.32,82313.07,7.07974
|
|
||||||
2025-03-18 13:29:00,82313.07,82380.0,82313.06,82379.99,4.46798
|
|
||||||
2025-03-18 13:30:00,82380.0,82395.78,82272.51,82279.8,22.01458
|
|
||||||
2025-03-18 13:31:00,82279.79,82279.79,82150.0,82177.0,61.89433
|
|
||||||
2025-03-18 13:32:00,82177.0,82242.67,82144.95,82241.52,68.84391
|
|
||||||
2025-03-18 13:33:00,82241.53,82392.95,82241.53,82353.8,18.87474
|
|
||||||
2025-03-18 13:34:00,82353.8,82356.94,82116.55,82122.65,45.68775
|
|
||||||
2025-03-18 13:35:00,82122.65,82122.65,81934.21,81934.21,124.59956
|
|
||||||
2025-03-18 13:36:00,81934.21,81949.2,81706.31,81806.3,151.11577
|
|
||||||
2025-03-18 13:37:00,81806.31,81806.31,81600.56,81709.34,87.82165
|
|
||||||
2025-03-18 13:38:00,81709.34,81806.31,81650.94,81803.4,56.14117
|
|
||||||
2025-03-18 13:39:00,81803.4,81883.99,81661.07,81687.36,44.04182
|
|
||||||
2025-03-18 13:40:00,81687.35,81687.36,81505.02,81578.77,66.14377
|
|
||||||
2025-03-18 13:41:00,81578.77,81698.79,81506.39,81665.89,45.52697
|
|
||||||
2025-03-18 13:42:00,81665.88,81780.0,81637.13,81755.71,67.95608
|
|
||||||
2025-03-18 13:43:00,81755.72,81789.49,81659.46,81660.07,18.84775
|
|
||||||
2025-03-18 13:44:00,81660.07,81668.07,81384.38,81404.94,154.87341
|
|
||||||
2025-03-18 13:45:00,81404.94,81563.92,81315.89,81478.72,101.69324
|
|
||||||
2025-03-18 13:46:00,81478.73,81509.44,81391.11,81394.25,28.17727
|
|
||||||
2025-03-18 13:47:00,81394.26,81456.55,81341.09,81341.1,28.75933
|
|
||||||
2025-03-18 13:48:00,81341.09,81442.24,81265.55,81333.84,63.48488
|
|
||||||
2025-03-18 13:49:00,81333.83,81482.14,81327.59,81400.0,60.46185
|
|
||||||
2025-03-18 13:50:00,81400.01,81425.15,81290.77,81322.12,28.34848
|
|
||||||
2025-03-18 13:51:00,81320.76,81438.11,81286.0,81360.11,34.55868
|
|
||||||
2025-03-18 13:52:00,81360.11,81400.0,81274.79,81280.01,42.77944
|
|
||||||
2025-03-18 13:53:00,81280.01,81392.55,81225.7,81392.12,32.67861
|
|
||||||
2025-03-18 13:54:00,81392.13,81526.63,81356.89,81467.26,80.19907
|
|
||||||
2025-03-18 13:55:00,81467.25,81468.71,81327.26,81342.7,26.07307
|
|
||||||
2025-03-18 13:56:00,81342.7,81371.32,81249.99,81336.37,34.56839
|
|
||||||
2025-03-18 13:57:00,81337.56,81337.56,81190.24,81191.39,48.16906
|
|
||||||
2025-03-18 13:58:00,81191.39,81263.94,81134.66,81255.62,88.78466
|
|
||||||
2025-03-18 13:59:00,81255.62,81465.16,81255.62,81366.64,31.49707
|
|
||||||
2025-03-18 14:00:00,81365.78,81365.78,81228.0,81328.89,33.0905
|
|
||||||
2025-03-18 14:01:00,81328.89,81447.41,81304.51,81403.48,25.23329
|
|
||||||
2025-03-18 14:02:00,81403.48,81446.35,81322.52,81322.52,36.46744
|
|
||||||
2025-03-18 14:03:00,81322.52,81381.73,81280.1,81291.85,26.68663
|
|
||||||
2025-03-18 14:04:00,81291.85,81379.0,81247.2,81376.02,33.11523
|
|
||||||
2025-03-18 14:05:00,81376.02,81425.84,81271.93,81356.66,31.12425
|
|
||||||
2025-03-18 14:06:00,81356.66,81455.47,81282.0,81452.33,24.56692
|
|
||||||
2025-03-18 14:07:00,81452.33,81593.15,81439.02,81519.62,32.20356
|
|
||||||
2025-03-18 14:08:00,81519.63,81593.9,81434.17,81479.46,36.47665
|
|
||||||
2025-03-18 14:09:00,81479.45,81554.17,81369.84,81533.04,35.05997
|
|
||||||
2025-03-18 14:10:00,81533.04,81548.59,81404.28,81414.15,17.8881
|
|
||||||
2025-03-18 14:11:00,81414.14,81426.25,81267.28,81334.98,42.76621
|
|
||||||
2025-03-18 14:12:00,81334.98,81367.51,81259.54,81318.37,32.84889
|
|
||||||
2025-03-18 14:13:00,81318.37,81320.76,81265.71,81306.53,11.46411
|
|
||||||
2025-03-18 14:14:00,81306.52,81306.52,81245.0,81272.32,60.45929
|
|
||||||
2025-03-18 14:15:00,81272.33,81343.08,81234.93,81327.65,20.18493
|
|
||||||
2025-03-18 14:16:00,81327.65,81404.0,81268.33,81398.34,21.50986
|
|
||||||
2025-03-18 14:17:00,81398.34,81398.34,81263.9,81265.1,49.47364
|
|
||||||
2025-03-18 14:18:00,81265.11,81315.62,81208.56,81250.78,33.90679
|
|
||||||
2025-03-18 14:19:00,81250.78,81262.54,81150.28,81221.96,52.5892
|
|
||||||
2025-03-18 14:20:00,81221.97,81344.33,81198.86,81262.22,23.80424
|
|
||||||
2025-03-18 14:21:00,81262.7,81358.12,81262.7,81302.26,19.55308
|
|
||||||
2025-03-18 14:22:00,81302.27,81382.98,81302.26,81382.97,24.50577
|
|
||||||
2025-03-18 14:23:00,81382.98,81446.5,81354.36,81421.31,17.32572
|
|
||||||
2025-03-18 14:24:00,81421.31,81449.55,81394.36,81449.55,22.40589
|
|
||||||
2025-03-18 14:25:00,81449.55,81476.47,81321.91,81383.14,18.06672
|
|
||||||
2025-03-18 14:26:00,81383.14,81397.5,81287.6,81370.92,10.92425
|
|
||||||
2025-03-18 14:27:00,81370.92,81451.1,81350.1,81445.19,10.31037
|
|
||||||
2025-03-18 14:28:00,81445.18,81524.78,81445.18,81524.78,14.57663
|
|
||||||
2025-03-18 14:29:00,81524.77,81628.38,81520.74,81624.11,44.85594
|
|
||||||
2025-03-18 14:30:00,81624.1,81665.03,81568.06,81569.5,46.96129
|
|
||||||
2025-03-18 14:31:00,81569.51,81602.38,81539.14,81542.45,30.05456
|
|
||||||
2025-03-18 14:32:00,81542.45,81588.07,81506.89,81576.84,14.83765
|
|
||||||
2025-03-18 14:33:00,81576.22,81686.8,81576.21,81686.79,25.22928
|
|
||||||
2025-03-18 14:34:00,81686.8,81725.15,81658.64,81694.64,23.88594
|
|
||||||
2025-03-18 14:35:00,81694.21,81694.21,81600.0,81668.79,10.39319
|
|
||||||
2025-03-18 14:36:00,81668.79,81750.0,81638.77,81750.0,27.2185
|
|
||||||
2025-03-18 14:37:00,81750.0,81770.33,81723.42,81740.99,20.46964
|
|
||||||
2025-03-18 14:38:00,81740.99,81849.52,81740.98,81829.01,37.48849
|
|
||||||
2025-03-18 14:39:00,81829.02,81893.92,81811.62,81866.0,27.5122
|
|
||||||
2025-03-18 14:40:00,81866.01,81959.99,81866.01,81945.7,78.9956
|
|
||||||
2025-03-18 14:41:00,81945.71,81970.0,81874.86,81956.0,21.59353
|
|
||||||
2025-03-18 14:42:00,81956.0,81978.86,81871.51,81913.49,18.88899
|
|
||||||
2025-03-18 14:43:00,81913.5,81920.0,81740.99,81750.18,24.61194
|
|
||||||
2025-03-18 14:44:00,81750.18,81750.18,81686.55,81692.0,15.60662
|
|
||||||
2025-03-18 14:45:00,81692.0,81754.87,81669.8,81718.23,12.57528
|
|
||||||
2025-03-18 14:46:00,81718.24,81718.24,81590.9,81590.9,20.6086
|
|
||||||
2025-03-18 14:47:00,81590.9,81664.91,81579.58,81601.37,14.06504
|
|
||||||
2025-03-18 14:48:00,81602.16,81602.81,81539.13,81539.13,13.47601
|
|
||||||
2025-03-18 14:49:00,81539.14,81572.7,81532.81,81547.88,16.63259
|
|
||||||
2025-03-18 14:50:00,81547.88,81729.58,81547.87,81700.78,25.56485
|
|
||||||
2025-03-18 14:51:00,81700.78,81821.19,81700.78,81801.69,17.66323
|
|
||||||
2025-03-18 14:52:00,81801.7,81822.25,81733.61,81742.44,10.9753
|
|
||||||
2025-03-18 14:53:00,81742.44,81754.88,81706.67,81746.36,7.05207
|
|
||||||
2025-03-18 14:54:00,81746.36,81817.39,81717.07,81792.51,7.93812
|
|
||||||
2025-03-18 14:55:00,81792.51,81858.25,81766.88,81827.67,16.68395
|
|
||||||
2025-03-18 14:56:00,81827.66,81835.34,81776.09,81835.34,13.34586
|
|
||||||
2025-03-18 14:57:00,81835.34,81835.35,81755.77,81790.42,9.45595
|
|
||||||
2025-03-18 14:58:00,81790.66,81812.14,81745.28,81812.14,6.50055
|
|
||||||
2025-03-18 14:59:00,81812.13,81888.27,81812.13,81854.71,13.74573
|
|
||||||
2025-03-18 15:00:00,81854.72,81862.91,81706.22,81730.86,17.45196
|
|
||||||
2025-03-18 15:01:00,81730.87,81730.87,81537.02,81560.69,33.9164
|
|
||||||
2025-03-18 15:02:00,81560.69,81617.88,81560.68,81573.43,10.97384
|
|
||||||
2025-03-18 15:03:00,81573.44,81617.79,81560.0,81586.0,26.86473
|
|
||||||
2025-03-18 15:04:00,81585.99,81686.75,81585.99,81680.75,17.92856
|
|
||||||
2025-03-18 15:05:00,81680.39,81680.39,81574.13,81610.24,7.19436
|
|
||||||
2025-03-18 15:06:00,81610.24,81610.24,81540.65,81540.66,13.09464
|
|
||||||
2025-03-18 15:07:00,81540.66,81614.94,81540.65,81599.91,22.15694
|
|
||||||
2025-03-18 15:08:00,81599.9,81650.95,81569.86,81647.42,14.00815
|
|
||||||
2025-03-18 15:09:00,81647.42,81677.95,81605.92,81605.92,6.82005
|
|
||||||
2025-03-18 15:10:00,81605.92,81629.2,81560.0,81588.33,11.62693
|
|
||||||
2025-03-18 15:11:00,81588.45,81649.54,81570.24,81618.86,6.87286
|
|
||||||
2025-03-18 15:12:00,81618.86,81664.76,81618.86,81664.76,6.69922
|
|
||||||
2025-03-18 15:13:00,81664.75,81756.86,81664.75,81720.01,10.22895
|
|
||||||
2025-03-18 15:14:00,81720.01,81800.0,81698.11,81772.72,10.30771
|
|
||||||
2025-03-18 15:15:00,81772.39,81792.46,81698.11,81698.11,8.59092
|
|
||||||
2025-03-18 15:16:00,81698.11,81721.49,81663.6,81663.6,11.10311
|
|
||||||
2025-03-18 15:17:00,81663.61,81663.61,81602.62,81615.17,11.49151
|
|
||||||
2025-03-18 15:18:00,81615.18,81731.25,81615.18,81712.88,8.34909
|
|
||||||
2025-03-18 15:19:00,81712.89,81716.35,81646.79,81655.77,6.04327
|
|
||||||
2025-03-18 15:20:00,81655.77,81781.33,81655.77,81752.77,29.06031
|
|
||||||
2025-03-18 15:21:00,81752.77,81783.41,81730.27,81750.07,47.09523
|
|
||||||
2025-03-18 15:22:00,81750.06,81782.99,81686.22,81691.81,7.27012
|
|
||||||
2025-03-18 15:23:00,81691.81,81724.53,81686.22,81699.17,9.83836
|
|
||||||
2025-03-18 15:24:00,81699.18,81710.12,81620.24,81710.11,23.56901
|
|
||||||
2025-03-18 15:25:00,81710.12,81829.57,81710.12,81817.39,34.84564
|
|
||||||
2025-03-18 15:26:00,81817.39,81850.0,81807.33,81833.57,16.55849
|
|
||||||
2025-03-18 15:27:00,81833.57,81933.04,81818.25,81901.55,28.62001
|
|
||||||
2025-03-18 15:28:00,81901.55,81937.02,81897.09,81937.01,4.68586
|
|
||||||
2025-03-18 15:29:00,81937.01,81937.02,81828.94,81834.71,24.42749
|
|
||||||
2025-03-18 15:30:00,81834.7,81885.15,81793.45,81861.46,24.54946
|
|
||||||
2025-03-18 15:31:00,81861.45,81865.37,81801.33,81844.26,8.67631
|
|
||||||
2025-03-18 15:32:00,81844.25,81886.27,81811.61,81861.05,7.56984
|
|
||||||
2025-03-18 15:33:00,81861.06,81959.85,81844.18,81959.84,24.32168
|
|
||||||
2025-03-18 15:34:00,81959.85,82014.72,81932.28,81932.28,41.70199
|
|
||||||
2025-03-18 15:35:00,81932.03,81971.68,81901.59,81951.97,18.91921
|
|
||||||
2025-03-18 15:36:00,81951.97,82143.4,81951.96,82132.86,34.13768
|
|
||||||
2025-03-18 15:37:00,82132.86,82136.67,82059.3,82062.44,52.99249
|
|
||||||
2025-03-18 15:38:00,82062.45,82179.82,82062.45,82179.81,27.22727
|
|
||||||
2025-03-18 15:39:00,82179.82,82209.64,82077.57,82145.35,45.07601
|
|
||||||
2025-03-18 15:40:00,82145.35,82212.93,82095.88,82095.88,11.55082
|
|
||||||
2025-03-18 15:41:00,82095.87,82097.18,81988.8,81996.32,11.97044
|
|
||||||
2025-03-18 15:42:00,81996.32,82022.06,81941.59,81947.27,13.10524
|
|
||||||
2025-03-18 15:43:00,81947.28,81983.99,81924.0,81924.0,17.15233
|
|
||||||
2025-03-18 15:44:00,81924.01,81925.86,81864.33,81874.15,11.69412
|
|
||||||
2025-03-18 15:45:00,81874.16,81996.59,81874.15,81983.31,12.5006
|
|
||||||
2025-03-18 15:46:00,81983.31,81983.31,81884.52,81894.92,7.35519
|
|
||||||
2025-03-18 15:47:00,81894.92,81894.93,81750.0,81750.0,16.49058
|
|
||||||
2025-03-18 15:48:00,81750.0,81850.65,81750.0,81830.41,17.81651
|
|
||||||
2025-03-18 15:49:00,81830.41,81843.0,81747.1,81747.1,6.73523
|
|
||||||
2025-03-18 15:50:00,81747.09,81747.09,81667.7,81678.22,12.07534
|
|
||||||
2025-03-18 15:51:00,81678.22,81725.96,81665.28,81680.28,8.16456
|
|
||||||
2025-03-18 15:52:00,81680.28,81737.31,81673.07,81712.4,13.17292
|
|
||||||
2025-03-18 15:53:00,81712.41,81726.72,81668.65,81699.39,5.86259
|
|
||||||
2025-03-18 15:54:00,81699.4,81718.55,81595.93,81628.38,20.28424
|
|
||||||
2025-03-18 15:55:00,81628.39,81688.43,81595.94,81595.95,11.99475
|
|
||||||
2025-03-18 15:56:00,81595.94,81597.74,81553.67,81563.6,15.86087
|
|
||||||
2025-03-18 15:57:00,81563.61,81572.97,81554.09,81561.94,5.97333
|
|
||||||
2025-03-18 15:58:00,81561.94,81618.45,81553.05,81557.97,14.87118
|
|
||||||
2025-03-18 15:59:00,81557.98,81620.41,81540.76,81610.21,10.27615
|
|
||||||
2025-03-18 16:00:00,81610.22,81613.49,81558.08,81588.23,5.71416
|
|
||||||
2025-03-18 16:01:00,81588.23,81685.6,81582.67,81679.64,12.00365
|
|
||||||
2025-03-18 16:02:00,81679.63,81729.46,81679.63,81729.46,5.93756
|
|
||||||
2025-03-18 16:03:00,81729.46,81745.29,81691.41,81738.66,9.97959
|
|
||||||
2025-03-18 16:04:00,81738.66,81738.66,81638.71,81650.42,21.7066
|
|
||||||
2025-03-18 16:05:00,81650.42,81704.84,81644.41,81703.92,13.0696
|
|
||||||
2025-03-18 16:06:00,81703.92,81703.93,81610.94,81630.84,4.65778
|
|
||||||
2025-03-18 16:07:00,81630.85,81642.67,81586.12,81586.12,12.30279
|
|
||||||
2025-03-18 16:08:00,81586.11,81591.86,81551.35,81551.35,25.43142
|
|
||||||
2025-03-18 16:09:00,81551.36,81560.41,81547.22,81549.83,5.99695
|
|
||||||
2025-03-18 16:10:00,81549.83,81580.0,81549.83,81580.0,6.51653
|
|
||||||
2025-03-18 16:11:00,81580.0,81650.94,81570.53,81650.94,21.23888
|
|
||||||
2025-03-18 16:12:00,81650.94,81711.02,81650.94,81711.02,9.24809
|
|
||||||
2025-03-18 16:13:00,81711.03,81746.58,81711.03,81713.11,17.27969
|
|
||||||
2025-03-18 16:14:00,81713.1,81843.43,81710.0,81841.38,10.0901
|
|
||||||
2025-03-18 16:15:00,81841.39,81877.36,81758.57,81772.76,12.0338
|
|
||||||
2025-03-18 16:16:00,81772.75,81782.24,81740.02,81740.02,5.67816
|
|
||||||
2025-03-18 16:17:00,81740.03,81759.99,81675.31,81675.33,4.62358
|
|
||||||
2025-03-18 16:18:00,81675.33,81751.72,81675.33,81736.7,7.66441
|
|
||||||
2025-03-18 16:19:00,81736.7,81759.5,81677.14,81699.99,3.26965
|
|
||||||
2025-03-18 16:20:00,81700.0,81785.19,81698.79,81785.18,3.21047
|
|
||||||
2025-03-18 16:21:00,81785.19,81874.59,81785.18,81853.88,4.96343
|
|
||||||
2025-03-18 16:22:00,81853.87,81903.36,81795.09,81895.62,6.70485
|
|
||||||
2025-03-18 16:23:00,81895.6,81898.44,81838.49,81855.51,12.43608
|
|
||||||
2025-03-18 16:24:00,81855.5,81855.51,81801.95,81833.87,4.61953
|
|
||||||
2025-03-18 16:25:00,81833.87,81833.87,81780.0,81799.98,5.10575
|
|
||||||
2025-03-18 16:26:00,81800.0,81885.58,81800.0,81885.58,7.73228
|
|
||||||
2025-03-18 16:27:00,81885.58,81973.17,81885.57,81973.17,6.93259
|
|
||||||
2025-03-18 16:28:00,81973.16,81994.8,81917.97,81991.81,7.89446
|
|
||||||
2025-03-18 16:29:00,81991.81,82120.94,81985.38,82112.35,21.28673
|
|
||||||
2025-03-18 16:30:00,82112.35,82134.07,82066.4,82075.47,24.02447
|
|
||||||
2025-03-18 16:31:00,82075.47,82105.51,82040.33,82040.34,8.29844
|
|
||||||
2025-03-18 16:32:00,82040.33,82160.05,82040.33,82156.94,10.81696
|
|
||||||
2025-03-18 16:33:00,82156.95,82160.49,82028.3,82033.41,16.86721
|
|
||||||
2025-03-18 16:34:00,82033.42,82033.42,81978.72,82000.0,4.59207
|
|
||||||
2025-03-18 16:35:00,82000.0,82085.61,82000.0,82063.29,5.15169
|
|
||||||
2025-03-18 16:36:00,82063.29,82113.61,82063.29,82075.48,14.1077
|
|
||||||
2025-03-18 16:37:00,82075.47,82088.79,81981.81,81981.81,7.29858
|
|
||||||
2025-03-18 16:38:00,81981.81,81992.84,81936.9,81952.18,4.52532
|
|
||||||
2025-03-18 16:39:00,81952.17,81952.18,81897.01,81934.76,8.49748
|
|
||||||
2025-03-18 16:40:00,81934.76,81938.45,81882.58,81911.6,7.96309
|
|
||||||
2025-03-18 16:41:00,81911.6,81918.0,81872.8,81917.99,2.31665
|
|
||||||
2025-03-18 16:42:00,81917.99,81917.99,81794.29,81794.3,16.69986
|
|
||||||
2025-03-18 16:43:00,81794.29,81820.27,81724.26,81724.26,11.12085
|
|
||||||
2025-03-18 16:44:00,81724.27,81724.27,81682.4,81706.6,11.15961
|
|
||||||
2025-03-18 16:45:00,81706.61,81759.11,81682.45,81759.1,6.98258
|
|
||||||
2025-03-18 16:46:00,81759.11,81785.5,81596.48,81600.07,16.27855
|
|
||||||
2025-03-18 16:47:00,81600.07,81629.82,81566.73,81587.85,15.91863
|
|
||||||
2025-03-18 16:48:00,81587.85,81602.97,81557.58,81602.97,10.00588
|
|
||||||
2025-03-18 16:49:00,81602.97,81641.9,81602.97,81631.36,9.68484
|
|
||||||
2025-03-18 16:50:00,81631.36,81631.85,81569.05,81608.23,9.14354
|
|
||||||
2025-03-18 16:51:00,81608.23,81608.23,81553.62,81564.5,9.38187
|
|
||||||
2025-03-18 16:52:00,81564.51,81587.96,81556.48,81572.47,3.88476
|
|
||||||
2025-03-18 16:53:00,81572.47,81662.6,81572.47,81662.6,18.20219
|
|
||||||
2025-03-18 16:54:00,81662.6,81718.46,81662.59,81718.46,23.06241
|
|
||||||
2025-03-18 16:55:00,81718.46,81756.19,81687.38,81735.07,18.22541
|
|
||||||
2025-03-18 16:56:00,81735.07,81747.08,81681.55,81745.26,16.18237
|
|
||||||
2025-03-18 16:57:00,81745.29,81873.56,81745.29,81863.26,34.44209
|
|
||||||
2025-03-18 16:58:00,81863.26,81904.0,81860.82,81877.07,20.6149
|
|
||||||
2025-03-18 16:59:00,81877.08,81930.0,81848.48,81851.51,18.04628
|
|
||||||
2025-03-18 17:00:00,81851.52,81874.71,81834.81,81860.64,22.19811
|
|
||||||
2025-03-18 17:01:00,81861.58,81861.59,81769.78,81856.08,20.74825
|
|
||||||
2025-03-18 17:02:00,81856.07,81919.86,81839.41,81857.26,19.93558
|
|
||||||
2025-03-18 17:03:00,81857.26,81975.8,81857.26,81940.0,24.33182
|
|
||||||
2025-03-18 17:04:00,81940.0,81940.0,81870.12,81870.12,19.16446
|
|
||||||
2025-03-18 17:05:00,81870.13,81870.13,81758.43,81774.92,17.3862
|
|
||||||
2025-03-18 17:06:00,81774.91,81774.91,81650.95,81663.38,12.90294
|
|
||||||
2025-03-18 17:07:00,81663.39,81664.17,81586.77,81586.78,7.07457
|
|
||||||
2025-03-18 17:08:00,81586.78,81656.01,81586.77,81630.14,7.91005
|
|
||||||
2025-03-18 17:09:00,81630.14,81681.92,81620.85,81620.86,12.41432
|
|
||||||
2025-03-18 17:10:00,81620.86,81650.44,81594.08,81645.4,18.76275
|
|
||||||
2025-03-18 17:11:00,81645.4,81710.68,81645.39,81710.68,16.50468
|
|
||||||
2025-03-18 17:12:00,81710.67,81745.29,81692.13,81739.83,14.8118
|
|
||||||
2025-03-18 17:13:00,81739.82,81784.0,81734.83,81783.99,18.46134
|
|
||||||
2025-03-18 17:14:00,81783.99,81783.99,81722.83,81731.71,19.84528
|
|
||||||
2025-03-18 17:15:00,81731.71,81750.7,81687.27,81687.27,5.12076
|
|
||||||
2025-03-18 17:16:00,81687.27,81714.02,81669.31,81697.67,13.68225
|
|
||||||
2025-03-18 17:17:00,81697.67,81738.0,81697.67,81714.21,16.95677
|
|
||||||
2025-03-18 17:18:00,81714.21,81737.21,81640.94,81708.52,19.59204
|
|
||||||
2025-03-18 17:19:00,81707.6,81745.29,81540.84,81603.78,94.16447
|
|
||||||
2025-03-18 17:20:00,81603.78,81671.56,81591.01,81603.19,17.45304
|
|
||||||
2025-03-18 17:21:00,81603.19,81700.0,81563.4,81692.6,20.97281
|
|
||||||
2025-03-18 17:22:00,81692.61,81736.4,81596.24,81600.0,18.84752
|
|
||||||
2025-03-18 17:23:00,81600.0,81603.53,81532.35,81579.82,20.12563
|
|
||||||
2025-03-18 17:24:00,81579.82,81648.64,81561.21,81641.13,30.95186
|
|
||||||
2025-03-18 17:25:00,81641.13,81725.18,81636.92,81660.9,18.62084
|
|
||||||
2025-03-18 17:26:00,81660.91,81702.66,81657.07,81679.14,13.8265
|
|
||||||
2025-03-18 17:27:00,81679.14,81711.89,81656.52,81662.09,16.94261
|
|
||||||
2025-03-18 17:28:00,81661.09,81686.16,81564.64,81580.28,16.18416
|
|
||||||
2025-03-18 17:29:00,81580.28,81602.42,81561.66,81599.79,19.21869
|
|
||||||
2025-03-18 17:30:00,81599.79,81601.72,81550.0,81556.52,12.3247
|
|
||||||
2025-03-18 17:31:00,81556.52,81633.46,81556.51,81594.8,10.09783
|
|
||||||
2025-03-18 17:32:00,81594.79,81595.03,81500.63,81507.9,18.44632
|
|
||||||
2025-03-18 17:33:00,81507.89,81508.7,81362.01,81380.34,39.15112
|
|
||||||
2025-03-18 17:34:00,81380.35,81501.32,81380.35,81484.81,8.77827
|
|
||||||
2025-03-18 17:35:00,81484.82,81601.89,81484.82,81572.11,20.13178
|
|
||||||
2025-03-18 17:36:00,81572.12,81670.33,81563.51,81660.11,6.7759
|
|
||||||
2025-03-18 17:37:00,81660.11,81712.9,81658.8,81691.96,6.42768
|
|
||||||
2025-03-18 17:38:00,81691.97,81755.32,81691.96,81709.22,15.18443
|
|
||||||
2025-03-18 17:39:00,81709.23,81828.0,81700.6,81828.0,9.12467
|
|
||||||
2025-03-18 17:40:00,81828.0,81854.41,81748.3,81756.25,7.09968
|
|
||||||
2025-03-18 17:41:00,81756.26,81756.26,81702.22,81715.55,6.25772
|
|
||||||
2025-03-18 17:42:00,81715.54,81730.56,81686.01,81689.91,2.19352
|
|
||||||
2025-03-18 17:43:00,81689.91,81691.52,81668.33,81674.26,2.77989
|
|
||||||
2025-03-18 17:44:00,81674.26,81704.98,81662.14,81683.16,3.59165
|
|
||||||
2025-03-18 17:45:00,81683.16,81767.03,81675.05,81745.81,5.03689
|
|
||||||
2025-03-18 17:46:00,81745.81,81745.81,81666.77,81674.95,5.57228
|
|
||||||
2025-03-18 17:47:00,81674.95,81674.95,81578.44,81578.44,7.09596
|
|
||||||
2025-03-18 17:48:00,81578.45,81580.29,81543.71,81556.96,5.05501
|
|
||||||
2025-03-18 17:49:00,81556.97,81556.97,81538.97,81551.04,2.54292
|
|
||||||
2025-03-18 17:50:00,81551.04,81645.12,81551.04,81645.12,16.60658
|
|
||||||
2025-03-18 17:51:00,81645.12,81658.71,81596.31,81603.55,6.00996
|
|
||||||
2025-03-18 17:52:00,81603.56,81650.6,81542.35,81547.99,8.68471
|
|
||||||
2025-03-18 17:53:00,81547.99,81583.96,81547.98,81581.1,7.35171
|
|
||||||
2025-03-18 17:54:00,81581.89,81588.06,81539.27,81539.27,2.76462
|
|
||||||
2025-03-18 17:55:00,81539.27,81539.27,81408.35,81442.22,32.96841
|
|
||||||
2025-03-18 17:56:00,81442.11,81442.11,81332.58,81378.1,10.38038
|
|
||||||
2025-03-18 17:57:00,81378.1,81437.46,81378.1,81410.87,7.80852
|
|
||||||
2025-03-18 17:58:00,81410.87,81503.04,81393.89,81498.34,7.75527
|
|
||||||
2025-03-18 17:59:00,81498.33,81498.34,81455.07,81466.7,5.808
|
|
||||||
2025-03-18 18:00:00,81466.69,81520.0,81454.48,81516.82,8.39115
|
|
||||||
2025-03-18 18:01:00,81516.81,81516.81,81417.21,81417.48,6.98269
|
|
||||||
2025-03-18 18:02:00,81417.47,81522.62,81417.47,81496.02,5.27918
|
|
||||||
2025-03-18 18:03:00,81496.02,81502.47,81429.6,81441.18,7.92018
|
|
||||||
2025-03-18 18:04:00,81441.17,81441.17,81353.12,81376.18,5.82264
|
|
||||||
2025-03-18 18:05:00,81376.17,81410.19,81316.47,81408.26,12.71138
|
|
||||||
2025-03-18 18:06:00,81408.26,81462.27,81391.49,81458.44,49.19065
|
|
||||||
2025-03-18 18:07:00,81458.44,81475.73,81397.15,81403.57,8.03105
|
|
||||||
2025-03-18 18:08:00,81403.56,81420.51,81385.2,81420.5,2.28445
|
|
||||||
2025-03-18 18:09:00,81420.5,81478.02,81420.5,81475.21,6.19765
|
|
||||||
2025-03-18 18:10:00,81475.21,81479.39,81346.91,81360.62,5.43564
|
|
||||||
2025-03-18 18:11:00,81360.61,81360.62,81324.86,81324.87,5.13904
|
|
||||||
2025-03-18 18:12:00,81324.87,81409.64,81324.86,81409.64,5.62549
|
|
||||||
2025-03-18 18:13:00,81409.64,81432.25,81383.88,81432.25,4.86721
|
|
||||||
2025-03-18 18:14:00,81432.25,81432.25,81350.34,81415.93,4.87158
|
|
||||||
2025-03-18 18:15:00,81415.93,81458.38,81388.55,81454.61,10.18617
|
|
||||||
2025-03-18 18:16:00,81454.62,81499.98,81454.61,81482.19,5.15283
|
|
||||||
2025-03-18 18:17:00,81482.19,81506.73,81482.19,81482.92,2.5449
|
|
||||||
2025-03-18 18:18:00,81482.92,81557.99,81479.96,81557.99,8.0644
|
|
||||||
2025-03-18 18:19:00,81558.0,81576.84,81510.36,81563.0,6.60217
|
|
||||||
2025-03-18 18:20:00,81562.99,81590.9,81539.31,81590.9,3.01914
|
|
||||||
2025-03-18 18:21:00,81590.9,81636.0,81562.75,81625.58,5.40148
|
|
||||||
2025-03-18 18:22:00,81625.57,81661.47,81602.48,81602.48,12.04318
|
|
||||||
2025-03-18 18:23:00,81602.48,81627.36,81582.89,81585.47,12.29812
|
|
||||||
2025-03-18 18:24:00,81585.47,81585.47,81517.33,81533.57,4.38175
|
|
||||||
2025-03-18 18:25:00,81533.57,81592.43,81509.43,81580.23,5.72116
|
|
||||||
2025-03-18 18:26:00,81581.76,81611.87,81563.11,81563.11,3.57005
|
|
||||||
2025-03-18 18:27:00,81563.11,81577.34,81519.92,81530.71,2.41366
|
|
||||||
2025-03-18 18:28:00,81530.7,81530.71,81394.97,81407.26,9.43027
|
|
||||||
2025-03-18 18:29:00,81407.26,81407.26,81347.82,81359.23,10.69513
|
|
||||||
2025-03-18 18:30:00,81359.22,81448.71,81341.76,81448.71,7.97531
|
|
||||||
2025-03-18 18:31:00,81448.71,81496.96,81423.75,81423.76,6.59775
|
|
||||||
2025-03-18 18:32:00,81423.75,81437.56,81347.79,81349.79,9.28849
|
|
||||||
2025-03-18 18:33:00,81349.79,81371.22,81324.74,81324.74,14.88694
|
|
||||||
2025-03-18 18:34:00,81324.73,81354.73,81245.65,81354.73,16.82847
|
|
||||||
2025-03-18 18:35:00,81355.39,81457.87,81355.39,81457.87,9.14506
|
|
||||||
2025-03-18 18:36:00,81457.87,81461.87,81415.1,81432.72,5.38162
|
|
||||||
2025-03-18 18:37:00,81432.73,81471.73,81403.14,81470.21,6.45022
|
|
||||||
2025-03-18 18:38:00,81470.21,81519.62,81470.2,81519.61,5.99898
|
|
||||||
2025-03-18 18:39:00,81519.62,81519.65,81498.25,81498.25,4.18891
|
|
||||||
2025-03-18 18:40:00,81498.25,81564.84,81480.66,81553.63,6.05941
|
|
||||||
2025-03-18 18:41:00,81553.62,81600.44,81553.62,81600.44,3.30305
|
|
||||||
2025-03-18 18:42:00,81600.44,81639.56,81600.44,81611.37,7.97882
|
|
||||||
2025-03-18 18:43:00,81611.37,81628.58,81583.46,81604.05,4.15877
|
|
||||||
2025-03-18 18:44:00,81604.04,81621.76,81587.21,81612.43,8.60074
|
|
||||||
2025-03-18 18:45:00,81612.42,81641.55,81609.64,81641.55,4.52834
|
|
||||||
2025-03-18 18:46:00,81641.54,81773.84,81629.3,81751.6,21.55993
|
|
||||||
2025-03-18 18:47:00,81751.6,81843.58,81751.6,81773.05,12.47914
|
|
||||||
2025-03-18 18:48:00,81773.05,81786.61,81744.98,81744.98,12.55314
|
|
||||||
2025-03-18 18:49:00,81744.99,81810.09,81670.63,81810.09,29.53972
|
|
||||||
2025-03-18 18:50:00,81810.09,81889.33,81793.79,81861.78,50.32391
|
|
||||||
2025-03-18 18:51:00,81861.77,81925.8,81840.0,81912.14,8.30528
|
|
||||||
2025-03-18 18:52:00,81912.14,81950.8,81912.13,81921.19,9.46701
|
|
||||||
2025-03-18 18:53:00,81921.19,81946.03,81885.85,81885.85,9.39329
|
|
||||||
2025-03-18 18:54:00,81885.84,81885.85,81804.16,81812.26,13.81034
|
|
||||||
2025-03-18 18:55:00,81812.26,81835.91,81770.33,81829.74,9.50184
|
|
||||||
2025-03-18 18:56:00,81829.74,81871.31,81829.74,81858.46,4.73619
|
|
||||||
2025-03-18 18:57:00,81858.46,81879.99,81817.42,81817.43,6.79463
|
|
||||||
2025-03-18 18:58:00,81817.42,81851.48,81817.42,81841.02,6.14362
|
|
||||||
2025-03-18 18:59:00,81841.02,81903.79,81811.28,81889.38,12.34753
|
|
||||||
2025-03-18 19:00:00,81889.38,81900.0,81859.13,81880.3,6.94009
|
|
||||||
2025-03-18 19:01:00,81880.3,81900.0,81869.24,81885.44,5.34371
|
|
||||||
2025-03-18 19:02:00,81885.44,81933.97,81865.05,81933.97,9.10795
|
|
||||||
2025-03-18 19:03:00,81933.97,81981.14,81915.98,81915.98,12.17448
|
|
||||||
2025-03-18 19:04:00,81915.99,81915.99,81885.74,81885.75,5.9769
|
|
||||||
2025-03-18 19:05:00,81885.74,81919.74,81795.53,81795.53,16.447
|
|
||||||
2025-03-18 19:06:00,81795.54,81803.43,81762.45,81764.69,3.42046
|
|
||||||
2025-03-18 19:07:00,81764.7,81805.38,81764.7,81800.82,3.534
|
|
||||||
2025-03-18 19:08:00,81800.82,81874.83,81784.55,81872.75,4.2034
|
|
||||||
2025-03-18 19:09:00,81872.75,81882.38,81851.65,81851.83,3.92122
|
|
||||||
2025-03-18 19:10:00,81851.82,81933.97,81851.82,81931.61,3.09234
|
|
||||||
2025-03-18 19:11:00,81931.61,81950.4,81902.16,81912.04,15.44948
|
|
||||||
2025-03-18 19:12:00,81912.04,81922.98,81856.37,81856.37,20.86963
|
|
||||||
2025-03-18 19:13:00,81856.38,81922.67,81856.38,81922.67,13.70422
|
|
||||||
2025-03-18 19:14:00,81922.67,81959.95,81915.97,81937.78,27.98196
|
|
||||||
2025-03-18 19:15:00,81937.77,81972.0,81905.52,81972.0,13.01985
|
|
||||||
2025-03-18 19:16:00,81972.0,82035.39,81972.0,81982.16,28.99653
|
|
||||||
2025-03-18 19:17:00,81982.16,81996.49,81925.2,81936.37,17.62095
|
|
||||||
2025-03-18 19:18:00,81936.37,82007.87,81922.46,81960.27,16.00946
|
|
||||||
2025-03-18 19:19:00,81960.27,82027.04,81960.26,82002.6,17.60903
|
|
||||||
2025-03-18 19:20:00,82002.6,82080.0,81978.06,82062.93,20.55201
|
|
||||||
2025-03-18 19:21:00,82062.92,82062.93,81984.67,81984.67,15.87192
|
|
||||||
2025-03-18 19:22:00,81984.68,82010.98,81946.19,81984.99,6.28485
|
|
||||||
2025-03-18 19:23:00,81984.99,81993.84,81956.79,81962.82,4.015
|
|
||||||
2025-03-18 19:24:00,81962.82,81999.21,81962.81,81989.35,4.31262
|
|
||||||
2025-03-18 19:25:00,81989.35,82008.5,81907.64,81907.65,5.91165
|
|
||||||
2025-03-18 19:26:00,81907.65,81914.83,81865.38,81904.76,11.7953
|
|
||||||
2025-03-18 19:27:00,81905.23,81927.94,81891.92,81891.92,10.19861
|
|
||||||
2025-03-18 19:28:00,81891.92,81951.28,81886.8,81897.66,16.07159
|
|
||||||
2025-03-18 19:29:00,81897.66,81939.24,81897.66,81905.9,5.39619
|
|
||||||
2025-03-18 19:30:00,81905.89,81937.89,81905.89,81913.99,21.92936
|
|
||||||
2025-03-18 19:31:00,81913.99,82016.0,81913.99,82016.0,9.26189
|
|
||||||
2025-03-18 19:32:00,82016.0,82096.78,82016.0,82094.46,5.60992
|
|
||||||
2025-03-18 19:33:00,82094.46,82107.66,82060.32,82060.32,14.16307
|
|
||||||
2025-03-18 19:34:00,82060.32,82060.32,82012.0,82013.03,4.97663
|
|
||||||
2025-03-18 19:35:00,82013.03,82013.03,81978.35,81999.66,5.58207
|
|
||||||
2025-03-18 19:36:00,81999.66,82034.4,81977.89,82022.93,11.98656
|
|
||||||
2025-03-18 19:37:00,82022.93,82069.83,81999.76,82056.84,19.31027
|
|
||||||
2025-03-18 19:38:00,82056.84,82073.99,82043.99,82044.0,23.98314
|
|
||||||
2025-03-18 19:39:00,82044.0,82052.42,82013.97,82013.97,16.52937
|
|
||||||
2025-03-18 19:40:00,82013.98,82045.3,81986.06,82044.76,6.18477
|
|
||||||
2025-03-18 19:41:00,82044.75,82090.85,82040.92,82090.84,8.78807
|
|
||||||
2025-03-18 19:42:00,82090.84,82100.0,82036.28,82053.79,8.36646
|
|
||||||
2025-03-18 19:43:00,82053.8,82090.79,82053.8,82066.81,3.334
|
|
||||||
2025-03-18 19:44:00,82066.82,82066.82,82018.14,82047.86,15.49273
|
|
||||||
2025-03-18 19:45:00,82047.85,82064.35,82033.46,82033.46,21.01642
|
|
||||||
2025-03-18 19:46:00,82033.47,82085.61,82018.16,82050.03,21.63155
|
|
||||||
2025-03-18 19:47:00,82050.02,82089.78,82024.94,82089.78,9.99382
|
|
||||||
2025-03-18 19:48:00,82089.79,82125.74,81988.77,82029.46,26.85952
|
|
||||||
2025-03-18 19:49:00,82029.45,82040.0,81997.57,82040.0,25.22158
|
|
||||||
2025-03-18 19:50:00,82039.99,82151.93,82024.04,82139.64,57.40431
|
|
||||||
2025-03-18 19:51:00,82139.64,82278.19,82135.09,82278.18,44.43593
|
|
||||||
2025-03-18 19:52:00,82278.19,82286.22,82258.71,82284.35,10.07692
|
|
||||||
2025-03-18 19:53:00,82284.34,82296.04,82203.92,82212.07,8.06669
|
|
||||||
2025-03-18 19:54:00,82212.08,82227.29,82146.9,82189.55,12.19554
|
|
||||||
2025-03-18 19:55:00,82190.64,82234.0,82159.63,82234.0,9.38061
|
|
||||||
2025-03-18 19:56:00,82234.0,82340.0,82234.0,82333.32,11.43252
|
|
||||||
2025-03-18 19:57:00,82333.32,82434.39,82311.61,82425.88,31.00991
|
|
||||||
2025-03-18 19:58:00,82425.88,82476.26,82403.12,82423.56,22.32909
|
|
||||||
2025-03-18 19:59:00,82423.56,82423.56,82136.36,82317.21,65.72402
|
|
||||||
2025-03-18 20:00:00,82317.2,82353.8,82286.66,82335.17,37.6649
|
|
||||||
2025-03-18 20:01:00,82335.17,82345.77,82296.78,82321.46,22.31643
|
|
||||||
2025-03-18 20:02:00,82321.46,82321.46,82231.36,82250.83,17.26545
|
|
||||||
2025-03-18 20:03:00,82250.83,82260.0,82193.69,82209.0,7.01832
|
|
||||||
2025-03-18 20:04:00,82209.0,82293.03,82209.0,82293.02,13.91668
|
|
||||||
2025-03-18 20:05:00,82293.02,82298.38,82253.85,82254.12,6.75394
|
|
||||||
2025-03-18 20:06:00,82254.12,82266.38,82204.96,82266.38,6.29009
|
|
||||||
2025-03-18 20:07:00,82266.37,82266.38,82219.21,82219.21,3.69733
|
|
||||||
2025-03-18 20:08:00,82219.2,82281.06,82217.29,82277.12,9.20933
|
|
||||||
2025-03-18 20:09:00,82277.13,82286.86,82227.24,82236.23,7.44053
|
|
||||||
2025-03-18 20:10:00,82236.24,82236.24,82139.25,82139.25,5.10631
|
|
||||||
2025-03-18 20:11:00,82139.25,82141.85,82129.69,82132.8,6.81897
|
|
||||||
2025-03-18 20:12:00,82132.79,82141.19,82123.41,82139.91,3.03187
|
|
||||||
2025-03-18 20:13:00,82139.91,82162.64,82139.61,82141.51,6.70715
|
|
||||||
2025-03-18 20:14:00,82141.52,82162.64,82109.09,82109.1,3.24215
|
|
||||||
2025-03-18 20:15:00,82109.09,82148.5,82098.47,82143.0,4.57557
|
|
||||||
2025-03-18 20:16:00,82143.28,82143.28,82112.84,82123.25,3.3066
|
|
||||||
2025-03-18 20:17:00,82123.26,82131.0,82080.0,82080.0,1.99924
|
|
||||||
2025-03-18 20:18:00,82080.01,82099.99,82067.13,82099.99,26.0898
|
|
||||||
2025-03-18 20:19:00,82100.0,82110.74,82099.99,82110.74,1.59621
|
|
||||||
2025-03-18 20:20:00,82110.74,82118.27,82075.53,82075.53,4.94475
|
|
||||||
2025-03-18 20:21:00,82075.53,82085.63,82056.41,82056.41,4.85415
|
|
||||||
2025-03-18 20:22:00,82056.42,82089.04,82056.41,82062.17,3.35115
|
|
||||||
2025-03-18 20:23:00,82062.17,82072.96,82034.39,82034.39,4.39515
|
|
||||||
2025-03-18 20:24:00,82034.39,82080.16,82031.1,82080.15,17.40334
|
|
||||||
2025-03-18 20:25:00,82080.16,82110.8,82080.15,82110.79,7.23017
|
|
||||||
2025-03-18 20:26:00,82110.79,82121.02,82093.9,82115.97,2.50765
|
|
||||||
2025-03-18 20:27:00,82115.97,82115.98,82009.85,82022.28,25.24254
|
|
||||||
2025-03-18 20:28:00,82022.29,82051.66,82022.23,82051.65,6.68184
|
|
||||||
2025-03-18 20:29:00,82051.66,82051.66,82020.32,82020.33,4.42843
|
|
||||||
2025-03-18 20:30:00,82020.32,82020.33,82005.05,82005.8,4.68025
|
|
||||||
2025-03-18 20:31:00,82005.8,82005.8,81951.33,81951.34,16.79332
|
|
||||||
2025-03-18 20:32:00,81951.33,81992.1,81951.33,81992.1,5.72818
|
|
||||||
2025-03-18 20:33:00,81992.09,82027.85,81992.09,82027.84,3.96814
|
|
||||||
2025-03-18 20:34:00,82027.84,82027.85,81990.26,81990.26,3.73441
|
|
||||||
2025-03-18 20:35:00,81990.27,82014.77,81990.26,81997.94,6.36413
|
|
||||||
2025-03-18 20:36:00,81997.95,81997.95,81930.58,81930.59,6.95756
|
|
||||||
2025-03-18 20:37:00,81930.59,81947.79,81925.27,81947.79,8.52316
|
|
||||||
2025-03-18 20:38:00,81947.78,81954.88,81870.23,81878.6,14.62447
|
|
||||||
2025-03-18 20:39:00,81878.59,81925.38,81878.59,81925.38,5.13343
|
|
||||||
2025-03-18 20:40:00,81925.38,81947.88,81925.38,81934.96,7.8506
|
|
||||||
2025-03-18 20:41:00,81934.96,81945.47,81925.37,81938.75,3.61531
|
|
||||||
2025-03-18 20:42:00,81938.75,81945.45,81930.58,81945.45,1.84301
|
|
||||||
2025-03-18 20:43:00,81945.45,81972.05,81945.44,81972.04,5.07964
|
|
||||||
2025-03-18 20:44:00,81972.04,81981.14,81947.36,81947.37,3.2604
|
|
||||||
2025-03-18 20:45:00,81947.36,81947.37,81890.16,81913.58,4.90284
|
|
||||||
2025-03-18 20:46:00,81913.58,81947.24,81913.58,81947.24,10.85898
|
|
||||||
2025-03-18 20:47:00,81947.23,81973.73,81942.61,81973.73,3.21991
|
|
||||||
2025-03-18 20:48:00,81973.72,81992.32,81973.72,81992.31,2.54276
|
|
||||||
2025-03-18 20:49:00,81992.32,81993.03,81954.73,81963.4,3.42926
|
|
||||||
2025-03-18 20:50:00,81963.39,81963.39,81945.26,81959.55,6.48209
|
|
||||||
2025-03-18 20:51:00,81959.54,81963.37,81921.34,81934.76,6.00171
|
|
||||||
2025-03-18 20:52:00,81934.77,81981.14,81934.76,81981.14,4.44
|
|
||||||
2025-03-18 20:53:00,81981.14,82012.09,81981.14,82012.08,2.2372
|
|
||||||
2025-03-18 20:54:00,82012.08,82016.56,82005.89,82016.56,2.80637
|
|
||||||
2025-03-18 20:55:00,82016.55,82016.55,81990.77,82013.47,1.34354
|
|
||||||
2025-03-18 20:56:00,82013.47,82027.82,81979.11,82005.67,3.91033
|
|
||||||
2025-03-18 20:57:00,82005.66,82017.87,81999.84,82017.87,3.52339
|
|
||||||
2025-03-18 20:58:00,82017.88,82026.65,82009.89,82026.65,6.34402
|
|
||||||
2025-03-18 20:59:00,82026.65,82050.0,82026.64,82050.0,2.91293
|
|
||||||
2025-03-18 21:00:00,82050.0,82054.51,82028.3,82054.51,4.23441
|
|
||||||
2025-03-18 21:01:00,82054.51,82067.6,82054.5,82067.6,2.70244
|
|
||||||
2025-03-18 21:02:00,82067.59,82071.24,82054.5,82071.24,1.65791
|
|
||||||
2025-03-18 21:03:00,82071.24,82075.48,82071.23,82075.47,2.16296
|
|
||||||
2025-03-18 21:04:00,82075.48,82075.48,82037.65,82037.66,6.03608
|
|
||||||
2025-03-18 21:05:00,82037.66,82082.31,82037.66,82072.68,4.02563
|
|
||||||
2025-03-18 21:06:00,82072.67,82082.2,82072.67,82072.92,2.70773
|
|
||||||
2025-03-18 21:07:00,82072.91,82079.71,82028.3,82035.8,4.99737
|
|
||||||
2025-03-18 21:08:00,82035.81,82084.49,82035.8,82084.49,3.02201
|
|
||||||
2025-03-18 21:09:00,82084.48,82095.75,82084.48,82095.74,4.82703
|
|
||||||
2025-03-18 21:10:00,82095.75,82095.75,82076.07,82076.07,3.28207
|
|
||||||
2025-03-18 21:11:00,82076.07,82076.08,82048.37,82048.38,1.95817
|
|
||||||
2025-03-18 21:12:00,82048.38,82093.8,82048.37,82093.8,5.30816
|
|
||||||
2025-03-18 21:13:00,82093.79,82142.27,82093.79,82140.97,7.82921
|
|
||||||
2025-03-18 21:14:00,82140.97,82193.52,82140.96,82193.52,18.83225
|
|
||||||
2025-03-18 21:15:00,82193.51,82193.52,82176.73,82183.97,14.77522
|
|
||||||
2025-03-18 21:16:00,82183.97,82188.15,82176.14,82176.15,4.31111
|
|
||||||
2025-03-18 21:17:00,82176.14,82176.15,82156.36,82156.37,2.46377
|
|
||||||
2025-03-18 21:18:00,82156.37,82192.93,82156.36,82192.93,1.70719
|
|
||||||
2025-03-18 21:19:00,82192.92,82208.9,82192.92,82192.93,5.05809
|
|
||||||
2025-03-18 21:20:00,82192.94,82192.94,82108.78,82111.2,7.48704
|
|
||||||
2025-03-18 21:21:00,82111.2,82123.59,82111.2,82123.58,3.39718
|
|
||||||
2025-03-18 21:22:00,82123.58,82139.83,82123.58,82139.83,2.14632
|
|
||||||
2025-03-18 21:23:00,82139.82,82139.83,82105.22,82105.22,2.7382
|
|
||||||
2025-03-18 21:24:00,82105.22,82105.22,82076.44,82076.44,1.21079
|
|
||||||
2025-03-18 21:25:00,82076.45,82095.06,82076.45,82084.5,3.06773
|
|
||||||
2025-03-18 21:26:00,82084.5,82092.91,82084.49,82092.91,1.28356
|
|
||||||
2025-03-18 21:27:00,82092.91,82100.39,82092.9,82100.39,2.43964
|
|
||||||
2025-03-18 21:28:00,82100.38,82115.65,82100.38,82115.65,1.63689
|
|
||||||
2025-03-18 21:29:00,82115.65,82119.46,82115.64,82119.45,1.79666
|
|
||||||
2025-03-18 21:30:00,82119.45,82119.64,82119.45,82119.46,3.62006
|
|
||||||
2025-03-18 21:31:00,82119.46,82119.46,82119.45,82119.45,3.48169
|
|
||||||
2025-03-18 21:32:00,82119.46,82119.46,82104.66,82104.67,3.76689
|
|
||||||
2025-03-18 21:33:00,82104.67,82104.67,82003.99,82009.96,10.28038
|
|
||||||
2025-03-18 21:34:00,82009.95,82042.84,82009.95,82042.83,3.73308
|
|
||||||
2025-03-18 21:35:00,82042.84,82042.84,81945.04,81945.04,4.82781
|
|
||||||
2025-03-18 21:36:00,81945.03,81991.92,81934.76,81991.91,2.17095
|
|
|
501
cache/ETH_USDT_1d_candles.csv
vendored
501
cache/ETH_USDT_1d_candles.csv
vendored
@ -1,501 +0,0 @@
|
|||||||
timestamp,open,high,low,close,volume
|
|
||||||
2023-11-05,1855.54,1912.67,1846.11,1891.71,383106.7628
|
|
||||||
2023-11-06,1891.71,1914.23,1868.48,1900.95,328746.2394
|
|
||||||
2023-11-07,1900.95,1906.99,1850.32,1885.27,332419.4406
|
|
||||||
2023-11-08,1885.26,1904.69,1872.51,1888.12,247881.0997
|
|
||||||
2023-11-09,1888.11,2132.0,1882.07,2121.32,1100330.3237
|
|
||||||
2023-11-10,2121.33,2136.99,2064.61,2077.72,555573.6495
|
|
||||||
2023-11-11,2077.72,2089.6,2030.3,2053.17,370966.0372
|
|
||||||
2023-11-12,2053.16,2066.5,2012.1,2044.68,281907.559
|
|
||||||
2023-11-13,2044.69,2118.0,2028.49,2053.65,529143.5815
|
|
||||||
2023-11-14,2053.65,2064.96,1936.6,1979.39,481909.733
|
|
||||||
2023-11-15,1979.4,2062.4,1967.01,2058.48,421870.8032
|
|
||||||
2023-11-16,2058.49,2090.4,1939.1,1961.77,545286.1297
|
|
||||||
2023-11-17,1961.77,1991.5,1904.0,1960.82,401100.5069
|
|
||||||
2023-11-18,1960.82,1971.22,1916.0,1962.6,279368.2208
|
|
||||||
2023-11-19,1962.59,2015.75,1942.89,2011.47,251132.6192
|
|
||||||
2023-11-20,2011.47,2066.4,1990.0,2021.4,421717.679
|
|
||||||
2023-11-21,2021.41,2034.6,1931.43,1933.01,490163.9863
|
|
||||||
2023-11-22,1933.01,2092.46,1928.41,2063.21,466120.5109
|
|
||||||
2023-11-23,2063.22,2088.94,2040.35,2062.2,250960.6958
|
|
||||||
2023-11-24,2062.21,2133.02,2059.37,2080.84,426932.5402
|
|
||||||
2023-11-25,2080.84,2090.54,2065.3,2083.09,138847.393
|
|
||||||
2023-11-26,2083.1,2094.99,2036.42,2062.33,241262.9136
|
|
||||||
2023-11-27,2062.34,2071.24,1985.78,2027.5,326073.2298
|
|
||||||
2023-11-28,2027.5,2075.0,1995.15,2048.14,283775.9279
|
|
||||||
2023-11-29,2048.14,2075.7,2019.31,2028.81,254843.7319
|
|
||||||
2023-11-30,2028.8,2054.75,2020.85,2051.96,243680.0921
|
|
||||||
2023-12-01,2051.95,2110.87,2045.04,2087.24,372645.586
|
|
||||||
2023-12-02,2087.24,2186.6,2086.25,2164.74,320979.9768
|
|
||||||
2023-12-03,2164.75,2217.27,2149.0,2192.95,257879.1136
|
|
||||||
2023-12-04,2192.96,2274.28,2191.0,2243.37,565307.7134
|
|
||||||
2023-12-05,2243.36,2309.72,2188.33,2293.33,529829.7214
|
|
||||||
2023-12-06,2293.33,2312.23,2220.0,2232.7,408338.9056
|
|
||||||
2023-12-07,2232.7,2382.8,2222.0,2355.73,485190.0609
|
|
||||||
2023-12-08,2355.74,2392.0,2336.34,2358.72,378569.8126
|
|
||||||
2023-12-09,2358.71,2403.0,2328.01,2340.49,287771.0664
|
|
||||||
2023-12-10,2340.48,2377.47,2320.0,2352.39,200037.2102
|
|
||||||
2023-12-11,2352.39,2355.0,2156.62,2225.12,640414.9166
|
|
||||||
2023-12-12,2225.11,2244.12,2166.0,2203.46,361588.9087
|
|
||||||
2023-12-13,2203.47,2284.02,2144.32,2260.16,418916.6154
|
|
||||||
2023-12-14,2260.16,2332.11,2228.2,2315.32,409508.0931
|
|
||||||
2023-12-15,2315.32,2317.81,2200.0,2220.5,333552.6328
|
|
||||||
2023-12-16,2220.51,2263.4,2210.0,2228.96,199285.923
|
|
||||||
2023-12-17,2228.96,2248.68,2191.5,2196.52,225925.7675
|
|
||||||
2023-12-18,2196.53,2224.0,2116.6,2219.43,364103.1181
|
|
||||||
2023-12-19,2219.44,2255.0,2135.55,2177.91,383551.4049
|
|
||||||
2023-12-20,2177.9,2266.0,2156.0,2202.17,416841.6272
|
|
||||||
2023-12-21,2202.17,2279.31,2182.98,2239.61,453069.7252
|
|
||||||
2023-12-22,2239.6,2343.15,2230.59,2324.54,591357.488
|
|
||||||
2023-12-23,2324.54,2330.65,2265.0,2308.2,255626.9352
|
|
||||||
2023-12-24,2308.2,2326.19,2245.0,2264.05,307951.076
|
|
||||||
2023-12-25,2264.04,2304.8,2253.1,2271.35,286277.1616
|
|
||||||
2023-12-26,2271.36,2274.99,2178.45,2230.88,410413.1591
|
|
||||||
2023-12-27,2230.88,2392.44,2211.88,2378.35,541537.3574
|
|
||||||
2023-12-28,2378.35,2445.8,2335.0,2344.15,580459.982
|
|
||||||
2023-12-29,2344.15,2385.41,2255.34,2299.2,450201.2069
|
|
||||||
2023-12-30,2299.2,2322.84,2268.0,2291.68,210539.1327
|
|
||||||
2023-12-31,2291.68,2321.34,2258.88,2281.87,222175.8779
|
|
||||||
2024-01-01,2281.87,2352.37,2265.24,2352.04,216702.6914
|
|
||||||
2024-01-02,2352.05,2431.3,2341.0,2355.34,458041.6642
|
|
||||||
2024-01-03,2355.35,2385.45,2100.0,2209.72,798729.4869
|
|
||||||
2024-01-04,2209.72,2294.69,2201.91,2267.11,448844.055
|
|
||||||
2024-01-05,2267.11,2277.21,2206.17,2268.78,405568.0006
|
|
||||||
2024-01-06,2268.78,2270.0,2216.4,2240.78,194067.8656
|
|
||||||
2024-01-07,2240.77,2258.01,2203.46,2221.42,207138.6927
|
|
||||||
2024-01-08,2221.42,2358.2,2166.38,2330.44,539407.5547
|
|
||||||
2024-01-09,2330.43,2371.72,2226.78,2344.29,551454.9931
|
|
||||||
2024-01-10,2344.3,2643.1,2339.59,2584.38,1038388.666
|
|
||||||
2024-01-11,2584.37,2689.39,2566.01,2618.01,838712.2577
|
|
||||||
2024-01-12,2618.01,2717.32,2458.0,2522.54,887457.7627
|
|
||||||
2024-01-13,2522.55,2590.0,2497.5,2578.19,422061.232
|
|
||||||
2024-01-14,2578.18,2578.69,2470.0,2472.87,306207.7463
|
|
||||||
2024-01-15,2472.87,2553.82,2470.92,2511.78,315247.8819
|
|
||||||
2024-01-16,2511.79,2614.43,2500.05,2587.4,381077.6247
|
|
||||||
2024-01-17,2587.41,2592.97,2506.75,2530.19,366281.3477
|
|
||||||
2024-01-18,2530.2,2550.0,2428.56,2470.81,452090.1074
|
|
||||||
2024-01-19,2470.81,2504.2,2415.2,2492.0,424209.0029
|
|
||||||
2024-01-20,2491.99,2492.0,2454.2,2472.01,136620.9577
|
|
||||||
2024-01-21,2472.02,2482.18,2452.13,2457.05,122572.1687
|
|
||||||
2024-01-22,2457.06,2466.1,2303.59,2314.2,526337.6492
|
|
||||||
2024-01-23,2314.19,2352.23,2168.07,2242.6,700105.3314
|
|
||||||
2024-01-24,2242.6,2264.6,2196.12,2235.02,359851.1496
|
|
||||||
2024-01-25,2235.02,2242.89,2171.3,2218.64,330426.6061
|
|
||||||
2024-01-26,2218.64,2282.36,2195.84,2267.68,363566.4255
|
|
||||||
2024-01-27,2267.67,2282.94,2251.4,2267.94,145109.3836
|
|
||||||
2024-01-28,2267.94,2308.24,2239.89,2256.9,209326.9087
|
|
||||||
2024-01-29,2256.9,2322.34,2233.8,2317.6,305528.0776
|
|
||||||
2024-01-30,2317.61,2391.98,2297.0,2343.01,372879.5791
|
|
||||||
2024-01-31,2343.0,2351.6,2263.57,2283.14,397116.1859
|
|
||||||
2024-02-01,2283.15,2311.72,2240.0,2304.28,310997.4538
|
|
||||||
2024-02-02,2304.28,2324.74,2281.94,2309.06,238230.511
|
|
||||||
2024-02-03,2309.07,2329.98,2292.75,2296.49,133232.845
|
|
||||||
2024-02-04,2296.5,2311.0,2266.0,2289.79,171232.0444
|
|
||||||
2024-02-05,2289.79,2338.41,2269.11,2301.83,236610.412
|
|
||||||
2024-02-06,2301.84,2392.4,2299.0,2372.64,368942.7498
|
|
||||||
2024-02-07,2372.63,2444.44,2354.0,2425.1,342791.7013
|
|
||||||
2024-02-08,2425.09,2463.15,2411.01,2419.55,338040.0539
|
|
||||||
2024-02-09,2419.56,2525.0,2419.16,2486.56,492384.385
|
|
||||||
2024-02-10,2486.56,2516.45,2471.57,2500.24,185099.59
|
|
||||||
2024-02-11,2500.24,2539.63,2493.45,2507.21,219975.1471
|
|
||||||
2024-02-12,2507.22,2665.58,2472.0,2659.99,449501.776
|
|
||||||
2024-02-13,2659.99,2686.12,2590.0,2639.99,502447.745
|
|
||||||
2024-02-14,2639.99,2786.0,2618.4,2774.81,461670.6944
|
|
||||||
2024-02-15,2774.8,2867.65,2759.25,2822.59,481689.1874
|
|
||||||
2024-02-16,2822.58,2857.4,2740.0,2801.8,378070.8037
|
|
||||||
2024-02-17,2801.81,2804.21,2719.01,2785.93,243302.8759
|
|
||||||
2024-02-18,2785.92,2895.0,2764.25,2881.2,367987.8833
|
|
||||||
2024-02-19,2881.2,2984.52,2856.93,2944.8,446451.0401
|
|
||||||
2024-02-20,2944.8,3033.09,2874.56,3014.81,634681.7471
|
|
||||||
2024-02-21,3014.81,3017.15,2868.0,2967.9,595955.0033
|
|
||||||
2024-02-22,2967.91,3036.02,2906.51,2971.4,563525.9936
|
|
||||||
2024-02-23,2971.4,2993.8,2906.05,2922.24,428826.5787
|
|
||||||
2024-02-24,2922.25,3007.48,2906.4,2992.62,243854.0783
|
|
||||||
2024-02-25,2992.62,3122.0,2983.61,3112.59,412306.2237
|
|
||||||
2024-02-26,3112.6,3196.0,3036.59,3175.94,546722.7747
|
|
||||||
2024-02-27,3175.95,3288.14,3160.02,3242.36,625939.1221
|
|
||||||
2024-02-28,3242.35,3488.0,3176.16,3383.1,947538.766
|
|
||||||
2024-02-29,3383.11,3522.81,3300.0,3340.09,759698.1123
|
|
||||||
2024-03-01,3340.1,3450.0,3338.54,3433.43,412788.5773
|
|
||||||
2024-03-02,3433.42,3460.04,3390.01,3421.4,283984.9448
|
|
||||||
2024-03-03,3421.39,3491.8,3360.0,3487.81,341631.531
|
|
||||||
2024-03-04,3487.8,3640.94,3423.78,3627.76,715360.6361
|
|
||||||
2024-03-05,3627.75,3822.04,3200.0,3553.65,1392904.864
|
|
||||||
2024-03-06,3553.66,3900.0,3499.4,3818.59,969494.71
|
|
||||||
2024-03-07,3818.58,3933.81,3735.0,3868.76,540780.675
|
|
||||||
2024-03-08,3868.76,3993.75,3820.0,3883.36,646399.2589
|
|
||||||
2024-03-09,3883.37,3942.0,3870.01,3905.21,254839.1386
|
|
||||||
2024-03-10,3905.2,3964.67,3791.26,3878.47,356664.68
|
|
||||||
2024-03-11,3878.47,4086.23,3722.95,4064.8,721554.5139
|
|
||||||
2024-03-12,4064.8,4093.92,3828.98,3979.96,632787.3941
|
|
||||||
2024-03-13,3979.97,4083.0,3932.23,4004.79,482305.7766
|
|
||||||
2024-03-14,4004.79,4010.98,3723.0,3881.7,648237.5245
|
|
||||||
2024-03-15,3881.69,3934.02,3570.0,3742.19,947537.4071
|
|
||||||
2024-03-16,3742.19,3781.12,3468.8,3523.09,548288.1569
|
|
||||||
2024-03-17,3523.09,3678.68,3412.0,3644.71,517790.9936
|
|
||||||
2024-03-18,3644.7,3645.02,3454.09,3520.46,570901.2851
|
|
||||||
2024-03-19,3520.47,3548.09,3150.88,3158.64,1049629.6936
|
|
||||||
2024-03-20,3158.65,3535.24,3056.56,3516.53,1207322.8201
|
|
||||||
2024-03-21,3516.53,3587.32,3412.0,3492.85,602755.2072
|
|
||||||
2024-03-22,3492.84,3542.52,3250.0,3336.35,558848.888
|
|
||||||
2024-03-23,3336.35,3435.48,3270.08,3329.53,323675.9448
|
|
||||||
2024-03-24,3329.53,3471.22,3298.76,3454.98,260824.1408
|
|
||||||
2024-03-25,3454.99,3666.0,3420.12,3590.42,548577.6942
|
|
||||||
2024-03-26,3590.43,3678.86,3542.62,3587.33,499013.8384
|
|
||||||
2024-03-27,3587.32,3665.84,3460.02,3501.19,443195.3297
|
|
||||||
2024-03-28,3501.2,3611.78,3465.0,3560.49,404654.0311
|
|
||||||
2024-03-29,3560.49,3584.37,3445.91,3509.74,323086.179
|
|
||||||
2024-03-30,3509.74,3565.81,3485.0,3505.64,216025.2634
|
|
||||||
2024-03-31,3505.65,3655.32,3505.09,3645.29,243464.9877
|
|
||||||
2024-04-01,3645.29,3645.95,3413.71,3503.8,408854.1225
|
|
||||||
2024-04-02,3503.8,3505.6,3212.0,3278.96,623264.6476
|
|
||||||
2024-04-03,3278.96,3367.4,3202.79,3310.83,410484.1933
|
|
||||||
2024-04-04,3310.83,3443.93,3250.92,3327.4,356338.5716
|
|
||||||
2024-04-05,3327.39,3350.0,3210.0,3317.85,344556.7983
|
|
||||||
2024-04-06,3317.85,3398.42,3306.68,3351.59,183312.2125
|
|
||||||
2024-04-07,3351.59,3459.94,3344.08,3454.2,210459.0752
|
|
||||||
2024-04-08,3454.2,3730.71,3406.36,3694.61,496841.3532
|
|
||||||
2024-04-09,3694.61,3727.34,3450.46,3506.39,457592.252
|
|
||||||
2024-04-10,3506.4,3562.95,3411.82,3545.64,389404.9055
|
|
||||||
2024-04-11,3545.64,3618.3,3474.52,3502.52,338139.5534
|
|
||||||
2024-04-12,3502.52,3552.4,3100.0,3237.43,697327.1174
|
|
||||||
2024-04-13,3237.42,3301.9,2852.0,3007.01,994971.2354
|
|
||||||
2024-04-14,3007.01,3174.23,2906.73,3155.11,737580.4666
|
|
||||||
2024-04-15,3155.11,3277.85,3023.19,3101.99,605430.8863
|
|
||||||
2024-04-16,3102.0,3128.01,2986.0,3084.22,526113.855
|
|
||||||
2024-04-17,3084.21,3123.75,2914.47,2985.41,491897.5661
|
|
||||||
2024-04-18,2985.41,3094.4,2950.98,3064.4,389060.7133
|
|
||||||
2024-04-19,3064.4,3128.89,2865.18,3056.46,607850.4019
|
|
||||||
2024-04-20,3056.45,3171.88,3018.75,3155.79,242577.6371
|
|
||||||
2024-04-21,3155.79,3197.18,3116.49,3147.67,219970.5072
|
|
||||||
2024-04-22,3147.66,3235.0,3129.15,3200.2,290879.9569
|
|
||||||
2024-04-23,3200.19,3263.61,3152.0,3219.46,253914.2263
|
|
||||||
2024-04-24,3219.46,3293.56,3104.9,3140.8,393314.3414
|
|
||||||
2024-04-25,3140.79,3191.64,3072.2,3155.8,352288.5502
|
|
||||||
2024-04-26,3155.81,3167.7,3102.0,3131.3,252522.655
|
|
||||||
2024-04-27,3131.3,3285.0,3066.74,3255.56,323811.1919
|
|
||||||
2024-04-28,3255.55,3357.4,3250.45,3263.45,304766.009
|
|
||||||
2024-04-29,3263.44,3286.95,3115.13,3216.73,421831.2912
|
|
||||||
2024-04-30,3216.74,3250.95,2921.0,3014.05,561717.4866
|
|
||||||
2024-05-01,3014.04,3023.24,2817.0,2972.46,624963.7773
|
|
||||||
2024-05-02,2972.46,3016.72,2893.26,2986.19,365939.7191
|
|
||||||
2024-05-03,2986.19,3126.99,2958.32,3102.61,355825.8391
|
|
||||||
2024-05-04,3102.61,3168.0,3092.85,3117.23,196263.9488
|
|
||||||
2024-05-05,3117.24,3171.93,3072.99,3136.41,218760.2662
|
|
||||||
2024-05-06,3136.4,3221.4,3046.35,3062.6,355135.3027
|
|
||||||
2024-05-07,3062.59,3129.85,2998.0,3005.69,298796.6829
|
|
||||||
2024-05-08,3005.69,3038.15,2936.48,2974.21,266934.8125
|
|
||||||
2024-05-09,2974.2,3059.0,2950.77,3036.23,238561.7462
|
|
||||||
2024-05-10,3036.24,3053.89,2878.03,2909.99,327855.8758
|
|
||||||
2024-05-11,2909.98,2945.67,2886.46,2912.45,138163.1087
|
|
||||||
2024-05-12,2912.45,2955.2,2901.17,2929.29,107670.6323
|
|
||||||
2024-05-13,2929.3,2996.4,2864.76,2950.99,368664.7648
|
|
||||||
2024-05-14,2950.99,2960.6,2862.0,2881.93,269675.3037
|
|
||||||
2024-05-15,2881.93,3041.36,2863.75,3032.55,350254.3188
|
|
||||||
2024-05-16,3032.55,3041.24,2922.8,2944.7,293874.1513
|
|
||||||
2024-05-17,2944.7,3120.0,2933.06,3092.01,357724.7731
|
|
||||||
2024-05-18,3092.0,3146.98,3083.61,3122.94,199976.5456
|
|
||||||
2024-05-19,3122.94,3136.64,3053.38,3071.19,166145.0102
|
|
||||||
2024-05-20,3071.2,3694.0,3047.67,3661.78,847107.8828
|
|
||||||
2024-05-21,3661.79,3841.54,3624.28,3789.6,952437.2694
|
|
||||||
2024-05-22,3789.59,3813.69,3653.15,3737.73,574938.3099
|
|
||||||
2024-05-23,3737.72,3949.29,3498.0,3783.61,1127866.076
|
|
||||||
2024-05-24,3783.6,3829.61,3626.1,3728.28,479418.4318
|
|
||||||
2024-05-25,3728.28,3779.43,3709.03,3749.26,158711.8992
|
|
||||||
2024-05-26,3749.25,3884.2,3731.17,3826.47,292208.2937
|
|
||||||
2024-05-27,3826.47,3977.0,3823.37,3894.22,363717.6473
|
|
||||||
2024-05-28,3894.21,3931.09,3773.9,3844.69,416249.8344
|
|
||||||
2024-05-29,3844.69,3888.55,3742.59,3767.43,334759.6348
|
|
||||||
2024-05-30,3767.44,3825.35,3702.58,3747.91,277034.2894
|
|
||||||
2024-05-31,3747.9,3849.94,3723.75,3762.29,273406.766
|
|
||||||
2024-06-01,3762.29,3833.3,3752.67,3815.82,132687.2006
|
|
||||||
2024-06-02,3815.82,3838.59,3752.62,3780.91,210295.4407
|
|
||||||
2024-06-03,3780.92,3849.99,3758.43,3767.06,248848.702
|
|
||||||
2024-06-04,3767.07,3831.65,3730.0,3810.23,232063.7468
|
|
||||||
2024-06-05,3810.23,3887.47,3777.33,3865.99,273738.6035
|
|
||||||
2024-06-06,3866.0,3878.6,3760.0,3813.46,237504.1905
|
|
||||||
2024-06-07,3813.47,3841.39,3600.0,3678.32,362223.3594
|
|
||||||
2024-06-08,3678.31,3709.5,3660.08,3681.57,140550.8394
|
|
||||||
2024-06-09,3681.58,3721.52,3666.36,3706.4,103451.102
|
|
||||||
2024-06-10,3706.4,3713.67,3642.74,3667.85,156363.1846
|
|
||||||
2024-06-11,3667.85,3673.0,3432.0,3497.33,439568.5978
|
|
||||||
2024-06-12,3497.33,3659.01,3462.07,3560.12,387043.774
|
|
||||||
2024-06-13,3560.13,3561.65,3428.0,3469.4,300230.8299
|
|
||||||
2024-06-14,3469.4,3532.61,3362.26,3481.8,322316.4214
|
|
||||||
2024-06-15,3481.8,3594.39,3473.1,3568.74,246649.5894
|
|
||||||
2024-06-16,3568.75,3653.79,3541.05,3624.41,149839.982
|
|
||||||
2024-06-17,3624.41,3638.37,3463.39,3511.46,337805.6402
|
|
||||||
2024-06-18,3511.47,3517.2,3355.0,3483.42,449913.3014
|
|
||||||
2024-06-19,3483.42,3590.01,3465.65,3560.51,321500.0027
|
|
||||||
2024-06-20,3560.51,3625.96,3486.0,3513.08,329117.8453
|
|
||||||
2024-06-21,3513.08,3547.55,3446.82,3518.5,342155.102
|
|
||||||
2024-06-22,3518.5,3521.19,3475.09,3495.75,115774.6157
|
|
||||||
2024-06-23,3495.76,3521.45,3406.38,3420.91,133411.4304
|
|
||||||
2024-06-24,3420.91,3435.76,3240.0,3352.73,543370.4229
|
|
||||||
2024-06-25,3352.74,3430.88,3336.76,3394.91,240036.2236
|
|
||||||
2024-06-26,3394.91,3426.75,3325.01,3371.44,206949.3418
|
|
||||||
2024-06-27,3371.77,3477.0,3361.74,3450.44,201041.6609
|
|
||||||
2024-06-28,3450.44,3487.7,3365.22,3380.15,221634.3435
|
|
||||||
2024-06-29,3380.15,3408.32,3371.86,3378.8,88329.7651
|
|
||||||
2024-06-30,3378.8,3460.0,3352.66,3438.16,141221.6185
|
|
||||||
2024-07-01,3438.16,3524.94,3423.78,3442.2,224273.2122
|
|
||||||
2024-07-02,3442.2,3464.12,3402.0,3421.35,137932.7014
|
|
||||||
2024-07-03,3421.35,3432.1,3251.0,3295.48,310408.451
|
|
||||||
2024-07-04,3295.49,3313.45,3050.34,3059.7,450402.5692
|
|
||||||
2024-07-05,3059.7,3110.0,2810.0,2981.78,886994.3753
|
|
||||||
2024-07-06,2981.79,3081.78,2955.06,3066.83,214487.9713
|
|
||||||
2024-07-07,3066.83,3073.08,2922.24,2931.0,179892.7612
|
|
||||||
2024-07-08,2930.99,3097.06,2822.8,3019.01,500445.0105
|
|
||||||
2024-07-09,3019.01,3115.2,3004.0,3066.65,280358.1424
|
|
||||||
2024-07-10,3066.65,3151.51,3024.0,3101.05,270827.0844
|
|
||||||
2024-07-11,3101.06,3217.24,3054.76,3099.57,289229.32
|
|
||||||
2024-07-12,3099.57,3157.89,3045.58,3133.88,185745.1825
|
|
||||||
2024-07-13,3133.89,3201.8,3113.37,3175.93,119302.4769
|
|
||||||
2024-07-14,3175.93,3268.72,3163.67,3245.08,165430.7716
|
|
||||||
2024-07-15,3245.2,3493.63,3233.22,3483.39,314296.8452
|
|
||||||
2024-07-16,3483.2,3498.59,3346.55,3444.13,376128.7869
|
|
||||||
2024-07-17,3444.14,3517.0,3376.0,3387.05,297812.5162
|
|
||||||
2024-07-18,3387.05,3489.98,3367.2,3426.5,241950.3457
|
|
||||||
2024-07-19,3426.49,3540.27,3377.0,3503.53,299511.6422
|
|
||||||
2024-07-20,3503.53,3539.65,3480.0,3517.5,147510.8165
|
|
||||||
2024-07-21,3517.5,3547.0,3411.4,3535.92,218890.9656
|
|
||||||
2024-07-22,3535.93,3562.82,3422.34,3439.6,207859.5043
|
|
||||||
2024-07-23,3439.61,3541.0,3389.0,3482.51,500568.7811
|
|
||||||
2024-07-24,3482.51,3487.82,3300.0,3335.81,275479.0506
|
|
||||||
2024-07-25,3335.82,3342.5,3087.53,3175.48,494282.6398
|
|
||||||
2024-07-26,3175.47,3286.36,3171.0,3274.61,260237.4641
|
|
||||||
2024-07-27,3274.6,3327.59,3191.01,3249.01,231991.9037
|
|
||||||
2024-07-28,3249.0,3284.3,3198.11,3270.16,103529.8329
|
|
||||||
2024-07-29,3270.16,3396.77,3258.0,3317.66,318471.5055
|
|
||||||
2024-07-30,3317.66,3366.4,3233.18,3279.21,197277.1641
|
|
||||||
2024-07-31,3279.2,3350.0,3213.75,3232.74,233711.3352
|
|
||||||
2024-08-01,3232.74,3242.57,3080.02,3203.4,323285.6105
|
|
||||||
2024-08-02,3203.4,3218.6,2967.0,2989.61,385704.0395
|
|
||||||
2024-08-03,2989.61,3018.02,2859.0,2903.64,344595.3157
|
|
||||||
2024-08-04,2903.65,2935.46,2630.0,2688.92,512064.2612
|
|
||||||
2024-08-05,2688.91,2697.44,2111.0,2419.59,2670604.1003
|
|
||||||
2024-08-06,2419.76,2556.23,2414.19,2461.33,642283.8038
|
|
||||||
2024-08-07,2461.33,2551.32,2309.04,2342.8,630115.4228
|
|
||||||
2024-08-08,2342.79,2724.1,2320.4,2682.5,586502.8754
|
|
||||||
2024-08-09,2682.5,2707.98,2552.61,2598.78,406739.9611
|
|
||||||
2024-08-10,2598.79,2644.7,2576.49,2609.92,171012.3619
|
|
||||||
2024-08-11,2609.92,2720.0,2540.0,2555.38,297098.9145
|
|
||||||
2024-08-12,2555.38,2750.0,2510.05,2722.3,483414.2506
|
|
||||||
2024-08-13,2722.3,2738.4,2611.37,2702.44,325668.6964
|
|
||||||
2024-08-14,2702.44,2780.0,2632.2,2661.45,338808.7141
|
|
||||||
2024-08-15,2661.45,2675.6,2515.71,2569.89,356546.8059
|
|
||||||
2024-08-16,2569.9,2630.97,2550.04,2592.73,270684.1373
|
|
||||||
2024-08-17,2592.72,2629.69,2587.5,2614.51,89348.676
|
|
||||||
2024-08-18,2614.51,2689.16,2594.53,2612.15,174074.0585
|
|
||||||
2024-08-19,2612.15,2648.08,2563.58,2636.36,224867.1336
|
|
||||||
2024-08-20,2636.36,2695.0,2555.0,2572.82,281315.8751
|
|
||||||
2024-08-21,2572.81,2663.74,2536.22,2630.71,245286.6218
|
|
||||||
2024-08-22,2630.71,2644.69,2584.2,2622.88,199063.1196
|
|
||||||
2024-08-23,2622.89,2799.13,2621.4,2762.48,401786.4447
|
|
||||||
2024-08-24,2762.49,2820.0,2731.26,2768.0,263542.9441
|
|
||||||
2024-08-25,2768.01,2792.28,2733.21,2746.13,154527.2352
|
|
||||||
2024-08-26,2746.12,2762.0,2666.66,2680.49,211708.1247
|
|
||||||
2024-08-27,2680.49,2699.98,2392.96,2457.33,412660.7197
|
|
||||||
2024-08-28,2457.33,2554.6,2418.8,2528.33,451266.634
|
|
||||||
2024-08-29,2528.33,2595.4,2505.88,2527.61,237002.9632
|
|
||||||
2024-08-30,2527.6,2552.17,2431.14,2526.0,338379.1941
|
|
||||||
2024-08-31,2525.99,2533.95,2491.92,2513.01,97946.1147
|
|
||||||
2024-09-01,2513.0,2516.28,2400.0,2425.72,223127.776
|
|
||||||
2024-09-02,2425.71,2564.83,2423.52,2538.01,262632.8385
|
|
||||||
2024-09-03,2538.0,2553.6,2411.12,2425.29,224793.1299
|
|
||||||
2024-09-04,2425.28,2490.0,2306.65,2450.71,384179.3854
|
|
||||||
2024-09-05,2450.71,2466.0,2348.04,2368.81,278549.0969
|
|
||||||
2024-09-06,2368.81,2408.83,2150.55,2225.23,643027.111
|
|
||||||
2024-09-07,2225.24,2311.27,2220.98,2273.58,219453.2076
|
|
||||||
2024-09-08,2273.58,2333.58,2240.94,2297.3,192775.6493
|
|
||||||
2024-09-09,2297.3,2381.41,2272.8,2359.5,321698.99
|
|
||||||
2024-09-10,2359.51,2400.0,2320.41,2388.52,224382.1729
|
|
||||||
2024-09-11,2388.52,2389.32,2277.68,2340.55,282898.3125
|
|
||||||
2024-09-12,2340.54,2391.93,2315.39,2361.76,202611.1308
|
|
||||||
2024-09-13,2361.75,2464.82,2337.35,2439.19,253888.616
|
|
||||||
2024-09-14,2439.19,2440.6,2376.72,2417.79,122931.2302
|
|
||||||
2024-09-15,2417.8,2430.32,2283.75,2316.1,174364.1946
|
|
||||||
2024-09-16,2316.09,2335.7,2252.39,2295.68,330555.232
|
|
||||||
2024-09-17,2295.67,2393.63,2263.29,2341.8,274206.2588
|
|
||||||
2024-09-18,2341.79,2376.14,2277.34,2374.75,292917.6008
|
|
||||||
2024-09-19,2374.74,2494.95,2372.6,2465.21,326746.032
|
|
||||||
2024-09-20,2465.21,2571.93,2437.31,2561.4,340754.5912
|
|
||||||
2024-09-21,2561.4,2623.34,2528.97,2612.4,154757.8773
|
|
||||||
2024-09-22,2612.4,2632.57,2524.58,2581.0,214266.3257
|
|
||||||
2024-09-23,2580.99,2702.82,2539.49,2646.97,333282.0149
|
|
||||||
2024-09-24,2646.98,2670.96,2591.56,2653.2,288600.0402
|
|
||||||
2024-09-25,2653.2,2673.5,2554.05,2579.95,220343.4293
|
|
||||||
2024-09-26,2580.09,2666.22,2559.2,2632.26,278686.3573
|
|
||||||
2024-09-27,2632.25,2728.6,2615.21,2694.43,281826.9792
|
|
||||||
2024-09-28,2694.43,2704.35,2650.0,2675.21,137230.0342
|
|
||||||
2024-09-29,2675.21,2683.7,2629.73,2657.62,160554.3146
|
|
||||||
2024-09-30,2657.62,2663.5,2575.0,2602.23,282475.7961
|
|
||||||
2024-10-01,2602.24,2659.0,2414.0,2447.79,502828.6804
|
|
||||||
2024-10-02,2447.78,2499.0,2352.0,2364.1,317484.735
|
|
||||||
2024-10-03,2364.09,2403.38,2310.0,2349.8,314635.9263
|
|
||||||
2024-10-04,2349.8,2441.64,2339.15,2414.41,239383.017
|
|
||||||
2024-10-05,2414.41,2428.23,2390.05,2414.66,103740.2316
|
|
||||||
2024-10-06,2414.66,2457.8,2407.0,2440.03,105752.7096
|
|
||||||
2024-10-07,2440.02,2521.0,2403.0,2422.71,300459.5407
|
|
||||||
2024-10-08,2422.7,2466.66,2401.18,2440.89,201358.3312
|
|
||||||
2024-10-09,2440.88,2473.61,2351.42,2370.47,242082.2725
|
|
||||||
2024-10-10,2370.47,2421.36,2330.66,2386.49,257862.9619
|
|
||||||
2024-10-11,2386.49,2471.45,2381.86,2439.5,235441.2197
|
|
||||||
2024-10-12,2439.49,2490.51,2434.35,2476.4,139637.5062
|
|
||||||
2024-10-13,2476.4,2484.92,2436.4,2468.91,120022.8179
|
|
||||||
2024-10-14,2468.92,2654.0,2443.39,2629.79,435900.1619
|
|
||||||
2024-10-15,2629.79,2688.6,2537.36,2607.41,416285.6663
|
|
||||||
2024-10-16,2607.41,2647.79,2588.67,2611.1,260322.4242
|
|
||||||
2024-10-17,2611.1,2648.37,2575.4,2605.8,221624.5434
|
|
||||||
2024-10-18,2605.79,2675.58,2596.49,2642.17,292444.1153
|
|
||||||
2024-10-19,2642.17,2663.49,2631.02,2648.2,105413.7646
|
|
||||||
2024-10-20,2648.2,2759.0,2635.54,2746.91,256910.7448
|
|
||||||
2024-10-21,2746.91,2769.48,2655.01,2666.7,310674.7232
|
|
||||||
2024-10-22,2666.71,2671.92,2606.56,2622.81,265747.713
|
|
||||||
2024-10-23,2622.81,2628.2,2450.0,2524.61,352647.7086
|
|
||||||
2024-10-24,2524.6,2562.65,2507.31,2535.82,290851.7842
|
|
||||||
2024-10-25,2535.82,2566.33,2382.59,2440.62,514799.8822
|
|
||||||
2024-10-26,2440.63,2508.0,2430.12,2482.51,248724.8313
|
|
||||||
2024-10-27,2482.51,2527.99,2464.13,2507.8,137580.8836
|
|
||||||
2024-10-28,2507.8,2589.67,2471.67,2567.48,360605.327
|
|
||||||
2024-10-29,2567.49,2681.86,2561.2,2638.8,451915.7025
|
|
||||||
2024-10-30,2638.8,2722.3,2599.66,2659.19,460627.126
|
|
||||||
2024-10-31,2659.19,2669.0,2503.0,2518.61,403427.2017
|
|
||||||
2024-11-01,2518.61,2586.8,2467.67,2511.49,430249.1073
|
|
||||||
2024-11-02,2511.49,2523.45,2470.0,2494.23,145961.13
|
|
||||||
2024-11-03,2494.23,2496.39,2411.0,2457.73,276009.1197
|
|
||||||
2024-11-04,2457.73,2491.39,2357.59,2398.21,306316.9645
|
|
||||||
2024-11-05,2398.21,2480.0,2380.74,2422.55,331619.2545
|
|
||||||
2024-11-06,2422.6,2744.7,2420.3,2721.87,1002141.3618
|
|
||||||
2024-11-07,2721.88,2916.11,2699.49,2895.47,730845.9524
|
|
||||||
2024-11-08,2895.47,2981.69,2886.4,2961.75,568864.435
|
|
||||||
2024-11-09,2961.75,3157.4,2953.71,3126.21,526719.4212
|
|
||||||
2024-11-10,3126.2,3248.52,3069.0,3183.21,865536.7322
|
|
||||||
2024-11-11,3183.2,3387.61,3105.0,3371.59,1000093.9664
|
|
||||||
2024-11-12,3371.59,3442.5,3207.67,3243.8,973215.7285
|
|
||||||
2024-11-13,3243.79,3331.0,3116.69,3187.16,919686.3747
|
|
||||||
2024-11-14,3187.16,3240.4,3028.56,3058.82,605780.6662
|
|
||||||
2024-11-15,3058.81,3131.06,3014.5,3090.01,519623.6317
|
|
||||||
2024-11-16,3090.01,3219.97,3072.0,3132.87,423639.1573
|
|
||||||
2024-11-17,3132.88,3162.11,3034.99,3076.0,449818.9575
|
|
||||||
2024-11-18,3075.99,3224.94,3050.01,3207.8,640658.4357
|
|
||||||
2024-11-19,3207.81,3221.2,3065.4,3107.44,500112.0194
|
|
||||||
2024-11-20,3107.45,3159.2,3029.41,3069.97,503607.8336
|
|
||||||
2024-11-21,3069.97,3386.73,3032.59,3355.81,974923.3035
|
|
||||||
2024-11-22,3355.88,3425.92,3257.54,3327.78,603273.7093
|
|
||||||
2024-11-23,3327.78,3497.51,3312.72,3393.91,721024.0009
|
|
||||||
2024-11-24,3393.91,3450.0,3281.4,3361.2,479098.0921
|
|
||||||
2024-11-25,3361.21,3546.66,3300.01,3414.49,949330.37
|
|
||||||
2024-11-26,3414.49,3462.49,3252.0,3324.73,672367.1503
|
|
||||||
2024-11-27,3324.74,3684.92,3302.4,3653.28,754573.6485
|
|
||||||
2024-11-28,3653.27,3661.92,3529.76,3578.79,560976.425
|
|
||||||
2024-11-29,3578.8,3647.98,3534.28,3592.21,425493.2449
|
|
||||||
2024-11-30,3592.22,3738.98,3568.4,3703.6,503684.7485
|
|
||||||
2024-12-01,3703.59,3746.8,3659.2,3707.61,429089.2633
|
|
||||||
2024-12-02,3707.61,3760.0,3554.32,3643.42,737130.569
|
|
||||||
2024-12-03,3643.43,3670.0,3500.0,3614.51,678400.2032
|
|
||||||
2024-12-04,3614.51,3887.24,3614.51,3837.8,968744.0399
|
|
||||||
2024-12-05,3837.8,3956.0,3677.0,3785.2,946493.276
|
|
||||||
2024-12-06,3785.21,4087.73,3777.26,3998.87,803855.1146
|
|
||||||
2024-12-07,3998.87,4024.46,3968.0,3996.22,283920.02
|
|
||||||
2024-12-08,3996.22,4015.58,3923.5,4004.15,251362.1685
|
|
||||||
2024-12-09,4004.15,4006.17,3509.0,3712.0,939642.2001
|
|
||||||
2024-12-10,3712.0,3780.76,3515.89,3628.25,942206.0337
|
|
||||||
2024-12-11,3628.24,3848.64,3562.34,3831.81,461818.7801
|
|
||||||
2024-12-12,3831.82,3987.41,3796.8,3881.61,572098.6343
|
|
||||||
2024-12-13,3881.6,3968.47,3852.94,3906.8,387765.6873
|
|
||||||
2024-12-14,3906.8,3945.0,3825.03,3870.29,265538.9479
|
|
||||||
2024-12-15,3870.3,3974.61,3831.5,3959.09,253833.2141
|
|
||||||
2024-12-16,3959.09,4107.8,3884.0,3986.24,673351.0723
|
|
||||||
2024-12-17,3986.24,4041.82,3847.96,3893.01,450825.9861
|
|
||||||
2024-12-18,3893.01,3907.19,3617.42,3626.8,722505.0618
|
|
||||||
2024-12-19,3626.8,3720.0,3326.8,3417.01,1078153.4851
|
|
||||||
2024-12-20,3417.01,3497.84,3101.9,3472.21,1332783.731
|
|
||||||
2024-12-21,3472.2,3555.18,3293.11,3338.92,475275.82
|
|
||||||
2024-12-22,3338.92,3403.0,3221.1,3281.83,361920.2495
|
|
||||||
2024-12-23,3281.83,3466.99,3216.97,3422.53,488253.41
|
|
||||||
2024-12-24,3422.53,3539.65,3358.19,3493.18,315723.6763
|
|
||||||
2024-12-25,3493.17,3547.95,3440.93,3497.0,219846.0452
|
|
||||||
2024-12-26,3497.0,3514.94,3304.63,3335.05,310170.1719
|
|
||||||
2024-12-27,3335.04,3444.16,3306.6,3333.51,342529.6038
|
|
||||||
2024-12-28,3333.51,3428.68,3322.23,3404.0,175963.613
|
|
||||||
2024-12-29,3404.01,3413.78,3326.17,3356.48,156256.8819
|
|
||||||
2024-12-30,3356.48,3437.59,3305.0,3361.84,420794.6476
|
|
||||||
2024-12-31,3361.83,3451.0,3315.59,3337.78,303053.1685
|
|
||||||
2025-01-01,3337.78,3374.85,3313.88,3360.38,190238.5861
|
|
||||||
2025-01-02,3360.39,3509.99,3354.5,3455.67,338632.8874
|
|
||||||
2025-01-03,3455.68,3630.0,3423.44,3609.01,334041.0604
|
|
||||||
2025-01-04,3609.0,3671.6,3572.42,3656.88,229266.3133
|
|
||||||
2025-01-05,3656.88,3675.25,3593.7,3635.99,155444.1741
|
|
||||||
2025-01-06,3636.0,3744.83,3610.63,3687.45,329642.3648
|
|
||||||
2025-01-07,3687.44,3700.86,3356.31,3381.31,541543.2874
|
|
||||||
2025-01-08,3381.31,3415.1,3208.2,3327.29,584749.9627
|
|
||||||
2025-01-09,3327.29,3357.27,3158.0,3219.2,501818.4247
|
|
||||||
2025-01-10,3219.2,3322.49,3193.97,3267.04,454142.1333
|
|
||||||
2025-01-11,3267.05,3320.18,3217.56,3282.83,151679.7487
|
|
||||||
2025-01-12,3282.83,3300.0,3224.49,3267.3,164879.9709
|
|
||||||
2025-01-13,3267.3,3339.0,2920.0,3137.51,845907.5669
|
|
||||||
2025-01-14,3137.51,3256.67,3125.65,3225.63,374308.4517
|
|
||||||
2025-01-15,3225.63,3473.75,3186.36,3451.52,464188.034
|
|
||||||
2025-01-16,3451.51,3460.79,3265.44,3308.05,471282.6787
|
|
||||||
2025-01-17,3308.04,3525.72,3307.5,3473.63,518550.1813
|
|
||||||
2025-01-18,3473.64,3494.39,3227.0,3307.71,634197.1281
|
|
||||||
2025-01-19,3307.71,3448.99,3130.48,3215.12,1342301.5217
|
|
||||||
2025-01-20,3215.2,3453.69,3142.78,3284.0,1158354.7814
|
|
||||||
2025-01-21,3283.99,3368.0,3204.6,3327.54,548706.0513
|
|
||||||
2025-01-22,3327.55,3365.99,3222.85,3242.6,349240.7498
|
|
||||||
2025-01-23,3242.61,3347.97,3185.0,3338.21,605592.7225
|
|
||||||
2025-01-24,3338.22,3428.0,3275.9,3310.09,451524.2752
|
|
||||||
2025-01-25,3310.1,3350.68,3268.66,3318.77,208559.0415
|
|
||||||
2025-01-26,3318.76,3362.0,3230.0,3232.61,272165.469
|
|
||||||
2025-01-27,3232.61,3253.91,3020.01,3182.44,753004.7529
|
|
||||||
2025-01-28,3182.44,3223.01,3040.03,3077.72,350959.7173
|
|
||||||
2025-01-29,3077.72,3182.52,3054.06,3113.9,409737.2576
|
|
||||||
2025-01-30,3113.9,3283.43,3091.06,3247.39,350639.4823
|
|
||||||
2025-01-31,3247.38,3437.31,3213.8,3300.99,648681.6561
|
|
||||||
2025-02-01,3300.99,3331.98,3101.7,3117.54,354065.2525
|
|
||||||
2025-02-02,3117.54,3163.2,2750.71,2869.68,1050805.93
|
|
||||||
2025-02-03,2869.68,2921.0,2125.01,2879.9,2807979.021
|
|
||||||
2025-02-04,2879.89,2888.5,2632.6,2731.19,1259594.3919
|
|
||||||
2025-02-05,2731.19,2826.95,2699.13,2788.25,686593.0429
|
|
||||||
2025-02-06,2788.25,2857.64,2655.28,2686.64,719459.9571
|
|
||||||
2025-02-07,2686.63,2797.5,2562.51,2622.1,695467.3612
|
|
||||||
2025-02-08,2622.11,2667.3,2588.8,2632.46,379685.1509
|
|
||||||
2025-02-09,2632.46,2698.9,2520.02,2627.18,387166.7911
|
|
||||||
2025-02-10,2627.18,2693.79,2559.85,2661.19,398173.2716
|
|
||||||
2025-02-11,2661.19,2725.04,2558.24,2602.59,484196.1435
|
|
||||||
2025-02-12,2602.59,2795.45,2546.92,2738.27,646295.8959
|
|
||||||
2025-02-13,2738.27,2757.28,2612.76,2675.87,443769.2853
|
|
||||||
2025-02-14,2675.87,2791.78,2664.46,2725.95,419895.7862
|
|
||||||
2025-02-15,2725.95,2739.0,2662.28,2693.04,202971.4295
|
|
||||||
2025-02-16,2693.04,2727.31,2651.26,2661.41,191231.5314
|
|
||||||
2025-02-17,2661.41,2849.7,2637.71,2744.05,639739.8197
|
|
||||||
2025-02-18,2744.05,2756.93,2605.44,2671.99,604863.8737
|
|
||||||
2025-02-19,2672.0,2736.7,2656.03,2715.5,326154.0202
|
|
||||||
2025-02-20,2715.51,2770.59,2707.18,2738.04,336735.2544
|
|
||||||
2025-02-21,2738.04,2845.32,2616.72,2663.0,890440.0604
|
|
||||||
2025-02-22,2663.0,2798.07,2653.33,2763.22,444989.6669
|
|
||||||
2025-02-23,2763.23,2857.34,2745.41,2819.69,611002.1222
|
|
||||||
2025-02-24,2819.7,2839.95,2470.33,2513.52,876876.3707
|
|
||||||
2025-02-25,2513.52,2533.49,2313.49,2495.7,1322016.8718
|
|
||||||
2025-02-26,2495.69,2507.5,2253.77,2336.37,863087.2133
|
|
||||||
2025-02-27,2336.38,2381.6,2230.57,2307.72,637583.9106
|
|
||||||
2025-02-28,2307.72,2314.18,2076.26,2237.59,1291539.0557
|
|
||||||
2025-03-01,2237.59,2281.14,2142.73,2217.39,446430.7271
|
|
||||||
2025-03-02,2217.4,2550.58,2172.04,2518.11,1266684.7748
|
|
||||||
2025-03-03,2518.12,2523.56,2097.91,2149.01,1109565.2003
|
|
||||||
2025-03-04,2149.02,2221.88,1993.2,2171.51,1257822.3372
|
|
||||||
2025-03-05,2171.5,2273.51,2155.03,2241.59,687130.7897
|
|
||||||
2025-03-06,2241.59,2319.99,2176.9,2202.2,596908.3935
|
|
||||||
2025-03-07,2202.21,2258.47,2101.82,2141.6,782387.2606
|
|
||||||
2025-03-08,2141.6,2234.93,2105.06,2203.58,328793.0979
|
|
||||||
2025-03-09,2203.57,2212.0,1989.66,2020.41,601671.5882
|
|
||||||
2025-03-10,2020.43,2152.4,1810.01,1865.1,1248745.3514
|
|
||||||
2025-03-11,1865.11,1963.2,1754.28,1923.43,1171427.9995
|
|
||||||
2025-03-12,1923.42,1960.0,1829.72,1908.2,788739.6763
|
|
||||||
2025-03-13,1908.2,1923.09,1821.81,1864.59,569537.2622
|
|
||||||
2025-03-14,1864.59,1945.64,1861.31,1911.65,375805.7355
|
|
||||||
2025-03-15,1911.64,1957.19,1903.75,1937.17,209545.8675
|
|
||||||
2025-03-16,1937.18,1940.87,1860.38,1887.0,324766.6564
|
|
||||||
2025-03-17,1887.01,1952.4,1879.98,1926.31,334840.4877
|
|
||||||
2025-03-18,1926.3,1930.25,1872.31,1910.02,302389.107
|
|
|
501
cache/ETH_USDT_1h_candles.csv
vendored
501
cache/ETH_USDT_1h_candles.csv
vendored
@ -1,501 +0,0 @@
|
|||||||
timestamp,open,high,low,close,volume
|
|
||||||
2025-02-26 02:00:00,2486.34,2504.68,2476.85,2498.51,15698.2422
|
|
||||||
2025-02-26 03:00:00,2498.5,2507.5,2488.4,2490.6,17371.5696
|
|
||||||
2025-02-26 04:00:00,2490.61,2501.5,2486.8,2489.7,12792.0843
|
|
||||||
2025-02-26 05:00:00,2489.73,2495.6,2475.37,2475.48,13025.1298
|
|
||||||
2025-02-26 06:00:00,2475.49,2501.66,2475.2,2500.89,16853.5666
|
|
||||||
2025-02-26 07:00:00,2500.9,2506.4,2476.85,2483.81,21481.1933
|
|
||||||
2025-02-26 08:00:00,2483.81,2487.99,2457.77,2464.39,25113.6994
|
|
||||||
2025-02-26 09:00:00,2464.39,2494.61,2463.0,2490.25,21418.4172
|
|
||||||
2025-02-26 10:00:00,2490.25,2495.79,2478.61,2494.02,28427.3436
|
|
||||||
2025-02-26 11:00:00,2494.02,2496.25,2463.14,2464.79,18806.8753
|
|
||||||
2025-02-26 12:00:00,2464.8,2466.81,2423.48,2433.11,30642.2428
|
|
||||||
2025-02-26 13:00:00,2433.1,2440.4,2410.28,2416.8,23187.4871
|
|
||||||
2025-02-26 14:00:00,2416.79,2436.8,2370.0,2435.88,72467.6933
|
|
||||||
2025-02-26 15:00:00,2435.89,2461.95,2424.5,2428.47,40059.5142
|
|
||||||
2025-02-26 16:00:00,2428.5,2434.2,2385.16,2398.69,38565.7749
|
|
||||||
2025-02-26 17:00:00,2398.69,2407.62,2355.5,2367.01,37583.6297
|
|
||||||
2025-02-26 18:00:00,2367.01,2383.2,2266.1,2293.23,133353.8793
|
|
||||||
2025-02-26 19:00:00,2293.21,2318.7,2276.78,2296.29,68848.9077
|
|
||||||
2025-02-26 20:00:00,2296.3,2340.33,2253.77,2339.77,89466.5654
|
|
||||||
2025-02-26 21:00:00,2339.76,2354.96,2301.62,2343.92,52007.5149
|
|
||||||
2025-02-26 22:00:00,2343.93,2383.37,2341.76,2349.0,35547.4717
|
|
||||||
2025-02-26 23:00:00,2348.99,2360.97,2324.97,2336.37,20468.7499
|
|
||||||
2025-02-27 00:00:00,2336.38,2349.3,2319.99,2337.88,23013.092
|
|
||||||
2025-02-27 01:00:00,2337.89,2365.08,2315.85,2344.51,26353.8219
|
|
||||||
2025-02-27 02:00:00,2344.51,2370.9,2328.34,2334.79,21620.0372
|
|
||||||
2025-02-27 03:00:00,2334.79,2347.43,2323.47,2332.11,20004.6295
|
|
||||||
2025-02-27 04:00:00,2332.11,2342.17,2303.22,2311.5,34761.136
|
|
||||||
2025-02-27 05:00:00,2311.49,2345.47,2307.71,2341.57,25707.9738
|
|
||||||
2025-02-27 06:00:00,2341.57,2366.77,2333.79,2348.83,28937.3206
|
|
||||||
2025-02-27 07:00:00,2348.84,2373.2,2346.71,2356.13,22416.9926
|
|
||||||
2025-02-27 08:00:00,2356.13,2361.27,2343.41,2352.26,14817.8078
|
|
||||||
2025-02-27 09:00:00,2352.26,2366.79,2346.43,2359.23,17681.2522
|
|
||||||
2025-02-27 10:00:00,2359.23,2378.85,2356.0,2378.17,21805.0594
|
|
||||||
2025-02-27 11:00:00,2378.17,2381.6,2347.12,2353.82,23538.6224
|
|
||||||
2025-02-27 12:00:00,2353.83,2361.5,2341.03,2347.09,17606.194
|
|
||||||
2025-02-27 13:00:00,2347.08,2362.35,2327.37,2336.89,28968.7703
|
|
||||||
2025-02-27 14:00:00,2336.88,2356.0,2305.11,2308.6,39943.4142
|
|
||||||
2025-02-27 15:00:00,2308.54,2336.41,2290.78,2323.31,43451.6863
|
|
||||||
2025-02-27 16:00:00,2323.31,2337.5,2292.16,2295.89,30725.3257
|
|
||||||
2025-02-27 17:00:00,2295.9,2326.7,2293.2,2322.18,26428.5176
|
|
||||||
2025-02-27 18:00:00,2322.19,2333.7,2319.68,2326.99,18797.0593
|
|
||||||
2025-02-27 19:00:00,2327.0,2327.69,2278.79,2292.41,29576.1768
|
|
||||||
2025-02-27 20:00:00,2292.41,2294.48,2240.94,2256.45,49633.2392
|
|
||||||
2025-02-27 21:00:00,2256.44,2284.52,2230.57,2282.77,36535.1938
|
|
||||||
2025-02-27 22:00:00,2282.77,2293.48,2265.33,2291.9,14361.9962
|
|
||||||
2025-02-27 23:00:00,2291.89,2311.52,2282.71,2307.72,20898.5918
|
|
||||||
2025-02-28 00:00:00,2307.72,2314.18,2294.61,2294.8,18954.3907
|
|
||||||
2025-02-28 01:00:00,2294.8,2300.5,2182.39,2193.56,154994.7808
|
|
||||||
2025-02-28 02:00:00,2193.6,2226.21,2123.63,2167.86,177709.7065
|
|
||||||
2025-02-28 03:00:00,2167.86,2195.26,2137.28,2147.49,59070.1586
|
|
||||||
2025-02-28 04:00:00,2147.49,2156.0,2094.94,2136.71,112670.9781
|
|
||||||
2025-02-28 05:00:00,2136.7,2140.73,2100.0,2131.5,59674.5531
|
|
||||||
2025-02-28 06:00:00,2131.5,2142.95,2076.26,2109.15,73412.1625
|
|
||||||
2025-02-28 07:00:00,2109.14,2135.6,2088.86,2106.2,58815.0726
|
|
||||||
2025-02-28 08:00:00,2106.21,2124.56,2076.26,2120.21,69475.0815
|
|
||||||
2025-02-28 09:00:00,2120.2,2151.94,2115.09,2143.63,45926.4129
|
|
||||||
2025-02-28 10:00:00,2143.63,2146.5,2112.35,2117.09,25167.0352
|
|
||||||
2025-02-28 11:00:00,2117.08,2142.7,2116.8,2125.8,23654.4848
|
|
||||||
2025-02-28 12:00:00,2125.8,2139.72,2105.38,2120.44,46540.8902
|
|
||||||
2025-02-28 13:00:00,2120.44,2164.99,2116.69,2151.71,70447.8304
|
|
||||||
2025-02-28 14:00:00,2151.71,2190.1,2138.7,2166.41,60546.3807
|
|
||||||
2025-02-28 15:00:00,2166.41,2228.64,2164.0,2223.01,56786.2006
|
|
||||||
2025-02-28 16:00:00,2223.0,2246.3,2206.53,2222.51,37983.6322
|
|
||||||
2025-02-28 17:00:00,2222.51,2241.39,2208.64,2209.3,25156.0849
|
|
||||||
2025-02-28 18:00:00,2209.36,2255.13,2193.4,2228.11,40505.6152
|
|
||||||
2025-02-28 19:00:00,2228.12,2238.69,2221.3,2230.15,17071.5752
|
|
||||||
2025-02-28 20:00:00,2230.14,2238.28,2198.51,2216.13,23286.2531
|
|
||||||
2025-02-28 21:00:00,2216.12,2234.6,2210.35,2225.3,13137.463
|
|
||||||
2025-02-28 22:00:00,2225.31,2231.96,2209.76,2216.58,11122.5832
|
|
||||||
2025-02-28 23:00:00,2216.59,2239.69,2213.57,2237.59,9429.7297
|
|
||||||
2025-03-01 00:00:00,2237.59,2246.58,2215.51,2216.16,13674.1815
|
|
||||||
2025-03-01 01:00:00,2216.17,2263.7,2213.33,2253.2,17878.589
|
|
||||||
2025-03-01 02:00:00,2253.2,2275.61,2240.0,2256.19,31428.4133
|
|
||||||
2025-03-01 03:00:00,2256.2,2270.31,2249.0,2262.98,13190.5457
|
|
||||||
2025-03-01 04:00:00,2262.98,2281.14,2258.4,2270.48,26173.4294
|
|
||||||
2025-03-01 05:00:00,2270.48,2270.63,2252.4,2255.9,16959.4756
|
|
||||||
2025-03-01 06:00:00,2255.91,2259.96,2230.36,2236.49,20145.687
|
|
||||||
2025-03-01 07:00:00,2236.5,2236.5,2224.2,2229.24,15689.5889
|
|
||||||
2025-03-01 08:00:00,2229.25,2239.77,2227.1,2234.78,14689.5692
|
|
||||||
2025-03-01 09:00:00,2234.78,2239.49,2228.38,2233.4,9504.9125
|
|
||||||
2025-03-01 10:00:00,2233.4,2233.67,2200.0,2204.8,25785.1454
|
|
||||||
2025-03-01 11:00:00,2204.8,2207.0,2171.41,2184.42,26823.4838
|
|
||||||
2025-03-01 12:00:00,2184.43,2187.7,2171.4,2181.3,17430.7538
|
|
||||||
2025-03-01 13:00:00,2181.31,2184.6,2142.73,2152.69,31788.3586
|
|
||||||
2025-03-01 14:00:00,2152.7,2167.42,2149.3,2163.31,19317.3667
|
|
||||||
2025-03-01 15:00:00,2163.32,2184.0,2158.7,2173.29,19098.0988
|
|
||||||
2025-03-01 16:00:00,2173.3,2211.95,2169.4,2209.94,21246.2102
|
|
||||||
2025-03-01 17:00:00,2209.94,2210.07,2190.44,2197.02,18560.3096
|
|
||||||
2025-03-01 18:00:00,2197.01,2219.0,2193.31,2212.11,14465.3302
|
|
||||||
2025-03-01 19:00:00,2212.11,2234.6,2211.23,2223.31,12736.4849
|
|
||||||
2025-03-01 20:00:00,2223.31,2226.1,2215.32,2225.3,10954.4442
|
|
||||||
2025-03-01 21:00:00,2225.29,2233.67,2208.29,2229.53,18598.5854
|
|
||||||
2025-03-01 22:00:00,2229.5,2235.99,2211.09,2214.99,13803.3194
|
|
||||||
2025-03-01 23:00:00,2214.99,2219.95,2191.1,2217.39,16488.444
|
|
||||||
2025-03-02 00:00:00,2217.4,2225.07,2203.51,2215.49,14977.2413
|
|
||||||
2025-03-02 01:00:00,2215.5,2226.53,2209.13,2220.53,15009.0267
|
|
||||||
2025-03-02 02:00:00,2220.53,2228.16,2201.1,2209.81,20427.1506
|
|
||||||
2025-03-02 03:00:00,2209.8,2230.82,2209.8,2220.18,14371.237
|
|
||||||
2025-03-02 04:00:00,2220.19,2235.6,2219.1,2227.11,11913.3635
|
|
||||||
2025-03-02 05:00:00,2227.11,2234.51,2224.0,2229.5,8369.5452
|
|
||||||
2025-03-02 06:00:00,2229.5,2233.67,2218.2,2222.96,11905.7946
|
|
||||||
2025-03-02 07:00:00,2222.95,2226.9,2218.59,2221.4,11139.9352
|
|
||||||
2025-03-02 08:00:00,2221.4,2223.07,2208.06,2212.59,26312.1112
|
|
||||||
2025-03-02 09:00:00,2212.58,2255.1,2211.93,2242.2,32135.8152
|
|
||||||
2025-03-02 10:00:00,2242.21,2260.84,2240.0,2246.01,20109.9476
|
|
||||||
2025-03-02 11:00:00,2246.01,2247.9,2236.52,2238.88,12879.535
|
|
||||||
2025-03-02 12:00:00,2238.88,2240.2,2223.0,2232.9,10615.2488
|
|
||||||
2025-03-02 13:00:00,2232.89,2232.89,2206.31,2212.31,21896.7516
|
|
||||||
2025-03-02 14:00:00,2212.3,2214.53,2172.04,2196.23,44460.0718
|
|
||||||
2025-03-02 15:00:00,2196.24,2291.76,2196.06,2229.51,161936.9916
|
|
||||||
2025-03-02 16:00:00,2229.51,2500.0,2212.35,2442.55,331427.3456
|
|
||||||
2025-03-02 17:00:00,2442.54,2510.82,2395.36,2480.49,214821.1792
|
|
||||||
2025-03-02 18:00:00,2480.5,2518.0,2454.02,2474.69,93586.2347
|
|
||||||
2025-03-02 19:00:00,2474.69,2499.97,2465.9,2486.42,33875.7249
|
|
||||||
2025-03-02 20:00:00,2486.42,2520.92,2480.19,2510.8,40096.9883
|
|
||||||
2025-03-02 21:00:00,2510.8,2540.86,2498.73,2525.01,45877.9706
|
|
||||||
2025-03-02 22:00:00,2525.01,2549.88,2507.18,2538.62,29403.994
|
|
||||||
2025-03-02 23:00:00,2538.62,2550.58,2508.39,2518.11,39135.5706
|
|
||||||
2025-03-03 00:00:00,2518.12,2523.56,2464.27,2465.52,42756.7935
|
|
||||||
2025-03-03 01:00:00,2465.52,2476.13,2450.52,2458.8,27023.4712
|
|
||||||
2025-03-03 02:00:00,2458.8,2461.5,2418.67,2447.48,36329.0703
|
|
||||||
2025-03-03 03:00:00,2447.49,2455.7,2425.17,2441.15,19949.9149
|
|
||||||
2025-03-03 04:00:00,2441.16,2452.9,2432.68,2450.32,18400.7989
|
|
||||||
2025-03-03 05:00:00,2450.32,2457.25,2440.67,2449.94,12463.6692
|
|
||||||
2025-03-03 06:00:00,2449.94,2450.0,2360.37,2372.68,56612.0987
|
|
||||||
2025-03-03 07:00:00,2372.68,2391.46,2352.79,2387.01,29191.7926
|
|
||||||
2025-03-03 08:00:00,2387.01,2388.11,2332.08,2345.41,31086.0888
|
|
||||||
2025-03-03 09:00:00,2345.41,2356.69,2322.01,2347.51,35838.1588
|
|
||||||
2025-03-03 10:00:00,2347.5,2366.29,2340.79,2359.19,22084.3278
|
|
||||||
2025-03-03 11:00:00,2359.18,2363.89,2344.4,2356.16,17653.0192
|
|
||||||
2025-03-03 12:00:00,2356.15,2376.78,2352.26,2367.69,27021.7407
|
|
||||||
2025-03-03 13:00:00,2367.69,2389.0,2361.3,2376.88,26703.5093
|
|
||||||
2025-03-03 14:00:00,2376.87,2380.85,2266.97,2274.81,103712.457
|
|
||||||
2025-03-03 15:00:00,2274.82,2309.48,2269.61,2287.24,76224.1301
|
|
||||||
2025-03-03 16:00:00,2287.3,2296.63,2258.0,2276.93,45099.106
|
|
||||||
2025-03-03 17:00:00,2276.92,2297.0,2265.33,2277.21,24335.0856
|
|
||||||
2025-03-03 18:00:00,2277.22,2280.35,2178.32,2193.33,116133.5925
|
|
||||||
2025-03-03 19:00:00,2193.33,2207.69,2100.0,2109.88,135640.2631
|
|
||||||
2025-03-03 20:00:00,2109.87,2140.4,2097.91,2129.16,106391.4535
|
|
||||||
2025-03-03 21:00:00,2129.16,2146.92,2103.76,2111.82,40979.7884
|
|
||||||
2025-03-03 22:00:00,2111.82,2173.29,2110.94,2166.41,34610.3267
|
|
||||||
2025-03-03 23:00:00,2166.41,2166.41,2142.7,2149.01,23324.5435
|
|
||||||
2025-03-04 00:00:00,2149.02,2167.18,2123.75,2125.21,50852.9422
|
|
||||||
2025-03-04 01:00:00,2125.21,2134.0,2025.65,2056.46,183918.7802
|
|
||||||
2025-03-04 02:00:00,2056.49,2082.0,2002.81,2067.61,112689.0768
|
|
||||||
2025-03-04 03:00:00,2067.61,2092.64,2054.64,2074.89,33918.648
|
|
||||||
2025-03-04 04:00:00,2074.89,2093.69,2070.5,2082.04,23165.9219
|
|
||||||
2025-03-04 05:00:00,2082.04,2122.12,2078.49,2106.94,35770.8174
|
|
||||||
2025-03-04 06:00:00,2106.94,2111.87,2090.11,2097.66,17738.9892
|
|
||||||
2025-03-04 07:00:00,2097.66,2102.96,2068.34,2080.41,23430.0424
|
|
||||||
2025-03-04 08:00:00,2080.42,2106.05,2071.03,2099.03,25577.3598
|
|
||||||
2025-03-04 09:00:00,2099.03,2111.83,2085.0,2087.77,21344.5372
|
|
||||||
2025-03-04 10:00:00,2087.78,2107.48,2078.73,2100.34,18718.0166
|
|
||||||
2025-03-04 11:00:00,2100.35,2118.0,2095.67,2101.36,17786.0631
|
|
||||||
2025-03-04 12:00:00,2101.38,2113.08,2082.42,2103.8,27028.6666
|
|
||||||
2025-03-04 13:00:00,2103.79,2103.79,2053.37,2064.21,50465.6759
|
|
||||||
2025-03-04 14:00:00,2064.21,2158.32,2041.0,2078.81,127346.1351
|
|
||||||
2025-03-04 15:00:00,2078.47,2086.96,2025.62,2074.49,77779.7209
|
|
||||||
2025-03-04 16:00:00,2074.49,2077.07,1993.2,2062.25,109215.3315
|
|
||||||
2025-03-04 17:00:00,2062.26,2134.68,2058.97,2118.91,67360.8558
|
|
||||||
2025-03-04 18:00:00,2118.9,2153.64,2109.75,2148.06,38851.4168
|
|
||||||
2025-03-04 19:00:00,2148.06,2174.72,2140.12,2172.65,47944.873
|
|
||||||
2025-03-04 20:00:00,2172.63,2193.15,2135.94,2139.05,54727.4854
|
|
||||||
2025-03-04 21:00:00,2139.06,2221.88,2126.06,2177.89,57232.3906
|
|
||||||
2025-03-04 22:00:00,2177.89,2187.71,2166.27,2166.5,22387.8363
|
|
||||||
2025-03-04 23:00:00,2166.49,2180.0,2159.21,2171.51,12570.7545
|
|
||||||
2025-03-05 00:00:00,2171.5,2176.14,2155.6,2165.1,21432.7001
|
|
||||||
2025-03-05 01:00:00,2165.09,2192.75,2155.03,2189.76,21154.7446
|
|
||||||
2025-03-05 02:00:00,2189.77,2190.75,2167.0,2179.3,16830.6705
|
|
||||||
2025-03-05 03:00:00,2179.29,2189.26,2159.59,2160.89,16361.1473
|
|
||||||
2025-03-05 04:00:00,2160.9,2175.2,2156.15,2173.19,25753.9207
|
|
||||||
2025-03-05 05:00:00,2173.2,2182.81,2170.31,2179.87,9817.3166
|
|
||||||
2025-03-05 06:00:00,2179.88,2195.0,2174.2,2190.37,18026.8903
|
|
||||||
2025-03-05 07:00:00,2190.37,2213.13,2188.12,2207.0,20467.3587
|
|
||||||
2025-03-05 08:00:00,2207.0,2226.5,2204.61,2213.57,19510.6698
|
|
||||||
2025-03-05 09:00:00,2213.56,2262.99,2212.0,2238.99,65290.3832
|
|
||||||
2025-03-05 10:00:00,2239.0,2273.51,2231.03,2237.98,46654.09
|
|
||||||
2025-03-05 11:00:00,2237.99,2239.47,2209.35,2217.72,26486.9913
|
|
||||||
2025-03-05 12:00:00,2217.72,2232.81,2210.56,2217.99,22659.2767
|
|
||||||
2025-03-05 13:00:00,2217.99,2224.0,2193.2,2202.53,37440.2113
|
|
||||||
2025-03-05 14:00:00,2202.54,2206.49,2166.48,2171.17,50548.6694
|
|
||||||
2025-03-05 15:00:00,2171.18,2204.4,2171.18,2186.82,86889.3878
|
|
||||||
2025-03-05 16:00:00,2186.81,2197.86,2161.78,2190.42,42544.6835
|
|
||||||
2025-03-05 17:00:00,2190.41,2220.5,2182.51,2194.84,37964.2513
|
|
||||||
2025-03-05 18:00:00,2194.85,2208.32,2175.77,2205.15,22255.0468
|
|
||||||
2025-03-05 19:00:00,2205.16,2222.99,2201.5,2218.3,16260.5112
|
|
||||||
2025-03-05 20:00:00,2218.3,2235.24,2214.12,2231.39,18432.2116
|
|
||||||
2025-03-05 21:00:00,2231.4,2241.47,2218.83,2235.21,16166.8268
|
|
||||||
2025-03-05 22:00:00,2235.2,2241.88,2227.0,2237.38,12433.7803
|
|
||||||
2025-03-05 23:00:00,2237.39,2252.86,2230.3,2241.59,15749.0499
|
|
||||||
2025-03-06 00:00:00,2241.59,2257.65,2234.07,2238.49,28678.3523
|
|
||||||
2025-03-06 01:00:00,2238.49,2288.6,2234.97,2263.49,30735.9284
|
|
||||||
2025-03-06 02:00:00,2263.49,2290.6,2261.3,2284.63,24546.9516
|
|
||||||
2025-03-06 03:00:00,2284.64,2296.31,2274.93,2289.79,21049.5086
|
|
||||||
2025-03-06 04:00:00,2289.79,2307.19,2275.91,2299.3,15366.8793
|
|
||||||
2025-03-06 05:00:00,2299.3,2319.99,2293.32,2313.71,21685.2659
|
|
||||||
2025-03-06 06:00:00,2313.71,2314.79,2290.23,2292.0,20041.7317
|
|
||||||
2025-03-06 07:00:00,2292.0,2300.86,2279.25,2286.0,13277.9658
|
|
||||||
2025-03-06 08:00:00,2285.99,2301.6,2279.3,2293.55,12140.4149
|
|
||||||
2025-03-06 09:00:00,2293.51,2294.38,2276.77,2284.27,13103.2194
|
|
||||||
2025-03-06 10:00:00,2284.27,2305.68,2279.8,2294.16,13400.7136
|
|
||||||
2025-03-06 11:00:00,2294.15,2301.1,2283.77,2297.59,13497.6079
|
|
||||||
2025-03-06 12:00:00,2297.59,2298.9,2259.5,2267.61,22941.3366
|
|
||||||
2025-03-06 13:00:00,2267.61,2275.0,2243.58,2246.29,75362.332
|
|
||||||
2025-03-06 14:00:00,2246.3,2257.31,2208.6,2225.0,57470.0622
|
|
||||||
2025-03-06 15:00:00,2225.0,2275.5,2222.12,2257.49,35387.6973
|
|
||||||
2025-03-06 16:00:00,2257.5,2265.7,2220.17,2222.26,26849.7173
|
|
||||||
2025-03-06 17:00:00,2222.26,2224.39,2176.9,2191.2,50546.24
|
|
||||||
2025-03-06 18:00:00,2191.19,2206.43,2188.79,2201.61,17998.2176
|
|
||||||
2025-03-06 19:00:00,2201.6,2218.95,2181.66,2198.34,17094.2625
|
|
||||||
2025-03-06 20:00:00,2198.35,2213.2,2192.3,2200.38,14009.2801
|
|
||||||
2025-03-06 21:00:00,2200.38,2219.36,2197.9,2213.71,16182.2801
|
|
||||||
2025-03-06 22:00:00,2213.74,2229.4,2195.62,2211.39,24348.2926
|
|
||||||
2025-03-06 23:00:00,2211.38,2217.56,2201.0,2202.2,11194.1358
|
|
||||||
2025-03-07 00:00:00,2202.21,2227.39,2101.82,2117.71,145457.1096
|
|
||||||
2025-03-07 01:00:00,2117.71,2192.4,2109.35,2175.0,55277.542
|
|
||||||
2025-03-07 02:00:00,2175.0,2185.96,2155.49,2155.63,24964.9278
|
|
||||||
2025-03-07 03:00:00,2155.67,2163.33,2146.51,2147.89,15862.818
|
|
||||||
2025-03-07 04:00:00,2147.9,2164.93,2144.59,2155.5,13364.9536
|
|
||||||
2025-03-07 05:00:00,2155.5,2184.0,2155.0,2174.79,12646.71
|
|
||||||
2025-03-07 06:00:00,2174.79,2186.7,2165.5,2182.31,9523.4091
|
|
||||||
2025-03-07 07:00:00,2182.3,2193.39,2178.0,2192.41,11476.1442
|
|
||||||
2025-03-07 08:00:00,2192.41,2196.17,2177.15,2185.43,10919.5481
|
|
||||||
2025-03-07 09:00:00,2185.42,2210.8,2184.95,2193.46,16361.4004
|
|
||||||
2025-03-07 10:00:00,2193.47,2198.4,2185.67,2196.9,10161.3006
|
|
||||||
2025-03-07 11:00:00,2196.91,2207.73,2190.22,2197.49,11102.5717
|
|
||||||
2025-03-07 12:00:00,2197.49,2199.0,2165.79,2178.42,21823.9072
|
|
||||||
2025-03-07 13:00:00,2178.42,2224.86,2177.88,2188.53,51689.4583
|
|
||||||
2025-03-07 14:00:00,2188.53,2258.47,2177.0,2221.65,56449.0795
|
|
||||||
2025-03-07 15:00:00,2221.66,2239.4,2180.95,2209.68,54240.9234
|
|
||||||
2025-03-07 16:00:00,2209.67,2215.49,2144.31,2158.41,60714.5208
|
|
||||||
2025-03-07 17:00:00,2158.42,2184.8,2141.53,2162.31,34168.0207
|
|
||||||
2025-03-07 18:00:00,2162.32,2200.82,2157.1,2194.84,23285.0694
|
|
||||||
2025-03-07 19:00:00,2194.85,2202.56,2161.71,2178.34,19281.8778
|
|
||||||
2025-03-07 20:00:00,2178.33,2186.96,2145.65,2157.51,34881.6962
|
|
||||||
2025-03-07 21:00:00,2157.41,2163.99,2115.0,2139.49,55114.1183
|
|
||||||
2025-03-07 22:00:00,2139.46,2151.99,2116.73,2144.59,18652.7461
|
|
||||||
2025-03-07 23:00:00,2144.6,2144.69,2115.64,2141.6,14967.4078
|
|
||||||
2025-03-08 00:00:00,2141.6,2153.27,2130.54,2152.19,9311.7152
|
|
||||||
2025-03-08 01:00:00,2152.2,2156.0,2105.06,2133.08,23291.065
|
|
||||||
2025-03-08 02:00:00,2133.07,2145.69,2131.32,2143.79,8874.9107
|
|
||||||
2025-03-08 03:00:00,2143.79,2143.79,2124.61,2137.8,8409.586
|
|
||||||
2025-03-08 04:00:00,2137.8,2152.69,2131.4,2150.01,9585.5448
|
|
||||||
2025-03-08 05:00:00,2150.01,2150.27,2133.4,2140.21,5103.89
|
|
||||||
2025-03-08 06:00:00,2140.21,2145.69,2133.16,2139.48,5450.3302
|
|
||||||
2025-03-08 07:00:00,2139.47,2148.0,2134.36,2137.88,5913.5796
|
|
||||||
2025-03-08 08:00:00,2137.89,2141.11,2126.78,2128.52,7469.2114
|
|
||||||
2025-03-08 09:00:00,2128.52,2143.5,2126.66,2129.82,7489.409
|
|
||||||
2025-03-08 10:00:00,2129.83,2140.99,2127.71,2139.61,6950.1205
|
|
||||||
2025-03-08 11:00:00,2139.6,2142.4,2127.31,2141.95,10211.9747
|
|
||||||
2025-03-08 12:00:00,2141.95,2173.69,2138.1,2168.59,28221.3316
|
|
||||||
2025-03-08 13:00:00,2168.6,2198.86,2166.41,2190.21,28663.3086
|
|
||||||
2025-03-08 14:00:00,2190.21,2195.5,2174.92,2178.81,20052.3845
|
|
||||||
2025-03-08 15:00:00,2178.8,2191.71,2167.69,2181.4,12664.1535
|
|
||||||
2025-03-08 16:00:00,2181.4,2198.32,2161.14,2189.99,19765.4009
|
|
||||||
2025-03-08 17:00:00,2189.99,2219.67,2179.73,2198.93,38633.4507
|
|
||||||
2025-03-08 18:00:00,2198.92,2213.87,2194.44,2208.69,12242.2824
|
|
||||||
2025-03-08 19:00:00,2208.68,2234.93,2200.8,2224.6,25658.7158
|
|
||||||
2025-03-08 20:00:00,2224.61,2225.99,2211.07,2212.99,8063.1941
|
|
||||||
2025-03-08 21:00:00,2212.99,2225.03,2212.89,2216.31,6521.1748
|
|
||||||
2025-03-08 22:00:00,2216.31,2217.7,2197.75,2207.51,9499.3191
|
|
||||||
2025-03-08 23:00:00,2207.51,2210.75,2193.2,2203.58,10747.0448
|
|
||||||
2025-03-09 00:00:00,2203.57,2209.69,2199.73,2206.22,6342.3591
|
|
||||||
2025-03-09 01:00:00,2206.21,2212.0,2187.49,2191.21,8617.2272
|
|
||||||
2025-03-09 02:00:00,2191.2,2194.3,2180.5,2182.0,8851.6021
|
|
||||||
2025-03-09 03:00:00,2182.01,2189.79,2176.14,2186.01,7869.0883
|
|
||||||
2025-03-09 04:00:00,2186.0,2191.61,2184.05,2189.49,6054.6544
|
|
||||||
2025-03-09 05:00:00,2189.49,2197.1,2180.4,2182.89,7414.0622
|
|
||||||
2025-03-09 06:00:00,2182.89,2184.9,2171.84,2179.9,7477.7213
|
|
||||||
2025-03-09 07:00:00,2179.89,2189.44,2178.39,2185.07,7557.788
|
|
||||||
2025-03-09 08:00:00,2185.07,2187.68,2175.71,2176.1,5785.906
|
|
||||||
2025-03-09 09:00:00,2176.11,2180.79,2160.92,2165.79,15790.7749
|
|
||||||
2025-03-09 10:00:00,2165.8,2170.3,2128.43,2134.1,20719.5374
|
|
||||||
2025-03-09 11:00:00,2134.1,2148.6,2132.41,2146.7,13255.7447
|
|
||||||
2025-03-09 12:00:00,2146.7,2146.9,2131.67,2141.81,18632.7265
|
|
||||||
2025-03-09 13:00:00,2141.81,2143.65,2102.61,2104.48,26443.0146
|
|
||||||
2025-03-09 14:00:00,2104.48,2124.65,2098.33,2117.5,39736.7373
|
|
||||||
2025-03-09 15:00:00,2117.49,2122.6,2084.67,2097.03,36938.583
|
|
||||||
2025-03-09 16:00:00,2097.01,2109.32,2041.99,2049.41,72300.5431
|
|
||||||
2025-03-09 17:00:00,2049.41,2057.13,2013.6,2023.93,66293.9726
|
|
||||||
2025-03-09 18:00:00,2023.93,2039.2,1998.0,2021.21,83972.8418
|
|
||||||
2025-03-09 19:00:00,2021.21,2049.27,2016.96,2040.47,26944.6417
|
|
||||||
2025-03-09 20:00:00,2040.47,2056.59,2036.61,2047.03,19914.6838
|
|
||||||
2025-03-09 21:00:00,2047.04,2050.4,2013.46,2024.51,20709.3475
|
|
||||||
2025-03-09 22:00:00,2024.5,2029.95,2006.2,2010.05,25595.92
|
|
||||||
2025-03-09 23:00:00,2010.05,2032.1,1989.66,2020.41,48452.1107
|
|
||||||
2025-03-10 00:00:00,2020.43,2043.4,1995.08,2036.48,36163.6155
|
|
||||||
2025-03-10 01:00:00,2036.47,2053.8,2027.41,2043.42,25898.0185
|
|
||||||
2025-03-10 02:00:00,2043.42,2071.0,2036.71,2057.91,27038.6593
|
|
||||||
2025-03-10 03:00:00,2057.9,2065.3,2044.29,2057.89,13495.2034
|
|
||||||
2025-03-10 04:00:00,2057.88,2076.6,2056.01,2072.7,11548.2819
|
|
||||||
2025-03-10 05:00:00,2072.71,2075.9,2059.3,2064.18,9324.1072
|
|
||||||
2025-03-10 06:00:00,2064.18,2068.0,2054.19,2067.1,11270.5415
|
|
||||||
2025-03-10 07:00:00,2067.1,2079.33,2063.0,2075.29,9692.0139
|
|
||||||
2025-03-10 08:00:00,2075.3,2077.3,2047.83,2062.1,18830.3696
|
|
||||||
2025-03-10 09:00:00,2062.1,2152.4,2061.62,2102.02,97575.068
|
|
||||||
2025-03-10 10:00:00,2102.02,2106.54,2076.5,2099.73,21093.6226
|
|
||||||
2025-03-10 11:00:00,2099.73,2139.0,2098.6,2128.12,22682.7151
|
|
||||||
2025-03-10 12:00:00,2128.11,2135.98,2105.44,2125.76,23670.8944
|
|
||||||
2025-03-10 13:00:00,2125.77,2129.8,2078.26,2083.63,35301.0098
|
|
||||||
2025-03-10 14:00:00,2083.62,2084.1,1997.0,2018.96,125972.4783
|
|
||||||
2025-03-10 15:00:00,2018.95,2043.79,2010.63,2017.83,54719.2146
|
|
||||||
2025-03-10 16:00:00,2017.83,2025.3,2000.0,2005.29,38250.4026
|
|
||||||
2025-03-10 17:00:00,2005.29,2008.72,1906.41,1917.49,163620.4878
|
|
||||||
2025-03-10 18:00:00,1917.48,1939.12,1812.76,1825.43,224651.4516
|
|
||||||
2025-03-10 19:00:00,1825.43,1883.44,1810.01,1869.3,139980.2033
|
|
||||||
2025-03-10 20:00:00,1869.43,1881.97,1843.23,1869.18,42247.0706
|
|
||||||
2025-03-10 21:00:00,1869.17,1903.99,1861.32,1887.3,39845.5661
|
|
||||||
2025-03-10 22:00:00,1887.3,1890.08,1865.88,1880.02,22019.8707
|
|
||||||
2025-03-10 23:00:00,1880.03,1895.49,1857.63,1865.1,33854.4851
|
|
||||||
2025-03-11 00:00:00,1865.11,1890.46,1754.28,1813.96,171539.7622
|
|
||||||
2025-03-11 01:00:00,1813.96,1858.77,1774.13,1858.66,105524.529
|
|
||||||
2025-03-11 02:00:00,1858.65,1877.96,1851.55,1859.69,51155.3016
|
|
||||||
2025-03-11 03:00:00,1859.68,1881.52,1847.27,1849.78,35471.6095
|
|
||||||
2025-03-11 04:00:00,1849.78,1879.19,1845.46,1869.06,28657.9618
|
|
||||||
2025-03-11 05:00:00,1869.06,1914.25,1866.62,1893.6,45804.7384
|
|
||||||
2025-03-11 06:00:00,1893.6,1905.6,1881.74,1884.83,21321.7011
|
|
||||||
2025-03-11 07:00:00,1884.82,1908.99,1882.98,1904.1,29352.459
|
|
||||||
2025-03-11 08:00:00,1904.1,1920.0,1899.57,1907.5,30995.0341
|
|
||||||
2025-03-11 09:00:00,1907.5,1929.68,1906.18,1917.29,40984.6965
|
|
||||||
2025-03-11 10:00:00,1917.29,1928.84,1910.69,1921.72,20313.261
|
|
||||||
2025-03-11 11:00:00,1921.71,1926.0,1906.05,1914.12,29574.7304
|
|
||||||
2025-03-11 12:00:00,1914.12,1915.58,1887.01,1894.9,46506.6862
|
|
||||||
2025-03-11 13:00:00,1894.91,1962.98,1867.0,1903.31,132070.3869
|
|
||||||
2025-03-11 14:00:00,1903.31,1912.63,1840.53,1866.89,125356.7604
|
|
||||||
2025-03-11 15:00:00,1866.88,1925.99,1864.27,1914.49,59623.387
|
|
||||||
2025-03-11 16:00:00,1914.49,1936.57,1898.77,1918.2,37550.9967
|
|
||||||
2025-03-11 17:00:00,1918.2,1918.57,1886.55,1904.91,32316.7605
|
|
||||||
2025-03-11 18:00:00,1904.91,1963.2,1904.19,1953.13,53766.4001
|
|
||||||
2025-03-11 19:00:00,1953.13,1963.0,1942.88,1951.91,24152.6287
|
|
||||||
2025-03-11 20:00:00,1951.9,1955.42,1936.09,1936.7,15209.8518
|
|
||||||
2025-03-11 21:00:00,1936.71,1950.61,1936.5,1940.4,9106.8783
|
|
||||||
2025-03-11 22:00:00,1940.4,1952.54,1936.99,1943.7,10323.7902
|
|
||||||
2025-03-11 23:00:00,1943.69,1944.45,1917.17,1923.43,14747.6881
|
|
||||||
2025-03-12 00:00:00,1923.42,1925.26,1905.67,1911.9,17585.4558
|
|
||||||
2025-03-12 01:00:00,1911.9,1932.86,1910.11,1920.6,19799.3266
|
|
||||||
2025-03-12 02:00:00,1920.61,1921.41,1887.42,1895.89,24313.3933
|
|
||||||
2025-03-12 03:00:00,1895.9,1898.6,1867.09,1868.9,27489.2193
|
|
||||||
2025-03-12 04:00:00,1868.89,1868.9,1852.3,1866.8,23820.9856
|
|
||||||
2025-03-12 05:00:00,1866.79,1880.82,1861.87,1878.53,15840.7181
|
|
||||||
2025-03-12 06:00:00,1878.53,1879.08,1853.12,1873.11,18481.2294
|
|
||||||
2025-03-12 07:00:00,1873.11,1916.66,1872.07,1911.26,52422.4788
|
|
||||||
2025-03-12 08:00:00,1911.26,1935.9,1871.11,1928.1,67585.9061
|
|
||||||
2025-03-12 09:00:00,1928.1,1960.0,1877.67,1894.1,97355.1544
|
|
||||||
2025-03-12 10:00:00,1894.09,1902.0,1883.7,1900.33,29777.0658
|
|
||||||
2025-03-12 11:00:00,1900.34,1919.39,1900.34,1916.84,20402.7409
|
|
||||||
2025-03-12 12:00:00,1916.85,1947.0,1908.19,1929.58,72671.8458
|
|
||||||
2025-03-12 13:00:00,1929.58,1932.46,1878.56,1887.53,59641.411
|
|
||||||
2025-03-12 14:00:00,1887.53,1896.15,1856.05,1860.8,53768.2453
|
|
||||||
2025-03-12 15:00:00,1860.81,1871.14,1829.72,1869.85,62871.9058
|
|
||||||
2025-03-12 16:00:00,1869.86,1884.08,1862.45,1871.64,24923.4496
|
|
||||||
2025-03-12 17:00:00,1871.65,1894.72,1865.6,1867.81,19288.5757
|
|
||||||
2025-03-12 18:00:00,1867.82,1888.19,1865.19,1878.65,16070.1966
|
|
||||||
2025-03-12 19:00:00,1878.64,1886.0,1867.5,1878.25,13371.9546
|
|
||||||
2025-03-12 20:00:00,1878.26,1893.56,1873.7,1891.76,13982.7826
|
|
||||||
2025-03-12 21:00:00,1891.77,1906.34,1890.29,1894.64,20337.3921
|
|
||||||
2025-03-12 22:00:00,1894.64,1915.95,1894.49,1910.61,9487.1081
|
|
||||||
2025-03-12 23:00:00,1910.62,1911.06,1901.5,1908.2,7451.135
|
|
||||||
2025-03-13 00:00:00,1908.2,1917.12,1897.08,1899.96,12367.7497
|
|
||||||
2025-03-13 01:00:00,1899.98,1913.67,1893.85,1907.01,11774.3508
|
|
||||||
2025-03-13 02:00:00,1907.02,1916.26,1888.73,1889.9,11311.0696
|
|
||||||
2025-03-13 03:00:00,1889.89,1900.93,1882.01,1886.87,16826.4521
|
|
||||||
2025-03-13 04:00:00,1886.87,1891.5,1869.0,1873.79,15424.7327
|
|
||||||
2025-03-13 05:00:00,1873.8,1882.05,1862.3,1867.83,12594.5522
|
|
||||||
2025-03-13 06:00:00,1867.84,1871.68,1858.32,1869.63,22775.8547
|
|
||||||
2025-03-13 07:00:00,1869.63,1881.19,1868.69,1872.23,20203.9669
|
|
||||||
2025-03-13 08:00:00,1872.23,1880.51,1853.9,1875.51,21505.0481
|
|
||||||
2025-03-13 09:00:00,1875.5,1895.63,1875.36,1891.04,36269.9854
|
|
||||||
2025-03-13 10:00:00,1891.05,1905.38,1867.53,1900.2,45689.2567
|
|
||||||
2025-03-13 11:00:00,1900.2,1903.35,1887.89,1900.24,19294.3037
|
|
||||||
2025-03-13 12:00:00,1900.24,1923.09,1889.99,1897.02,62323.5683
|
|
||||||
2025-03-13 13:00:00,1897.06,1909.6,1866.5,1871.47,37768.63
|
|
||||||
2025-03-13 14:00:00,1871.47,1897.66,1864.21,1884.55,41900.3309
|
|
||||||
2025-03-13 15:00:00,1884.56,1889.55,1867.18,1875.41,33482.3154
|
|
||||||
2025-03-13 16:00:00,1875.41,1878.1,1851.03,1857.67,26625.4903
|
|
||||||
2025-03-13 17:00:00,1857.68,1861.13,1821.81,1826.95,39913.3589
|
|
||||||
2025-03-13 18:00:00,1826.95,1854.53,1826.29,1851.89,21298.2768
|
|
||||||
2025-03-13 19:00:00,1851.89,1858.44,1838.47,1851.82,15432.439
|
|
||||||
2025-03-13 20:00:00,1851.73,1859.85,1839.4,1842.72,12483.0824
|
|
||||||
2025-03-13 21:00:00,1842.72,1862.3,1840.16,1862.29,8316.8571
|
|
||||||
2025-03-13 22:00:00,1862.29,1881.45,1857.76,1879.2,9450.695
|
|
||||||
2025-03-13 23:00:00,1879.14,1883.56,1861.25,1864.59,14504.8955
|
|
||||||
2025-03-14 00:00:00,1864.59,1879.64,1861.31,1874.6,9977.9729
|
|
||||||
2025-03-14 01:00:00,1874.6,1889.6,1868.93,1881.6,8514.3209
|
|
||||||
2025-03-14 02:00:00,1881.59,1894.63,1880.2,1892.64,10993.585
|
|
||||||
2025-03-14 03:00:00,1892.64,1893.27,1884.12,1889.29,7947.7613
|
|
||||||
2025-03-14 04:00:00,1889.28,1896.94,1888.9,1890.81,8767.0658
|
|
||||||
2025-03-14 05:00:00,1890.8,1899.58,1887.73,1895.33,7367.3584
|
|
||||||
2025-03-14 06:00:00,1895.33,1896.4,1888.39,1895.16,11745.7056
|
|
||||||
2025-03-14 07:00:00,1895.15,1905.42,1888.25,1893.07,15725.4969
|
|
||||||
2025-03-14 08:00:00,1893.08,1903.0,1891.39,1895.01,18286.7742
|
|
||||||
2025-03-14 09:00:00,1895.01,1899.0,1892.0,1894.56,6741.8287
|
|
||||||
2025-03-14 10:00:00,1894.57,1908.93,1889.5,1898.15,14003.3542
|
|
||||||
2025-03-14 11:00:00,1898.16,1906.72,1897.84,1902.4,7761.396
|
|
||||||
2025-03-14 12:00:00,1902.41,1908.59,1894.8,1902.79,11200.4525
|
|
||||||
2025-03-14 13:00:00,1902.79,1914.6,1892.37,1904.87,21957.1409
|
|
||||||
2025-03-14 14:00:00,1904.87,1924.22,1877.69,1919.49,55852.5188
|
|
||||||
2025-03-14 15:00:00,1919.5,1942.48,1914.4,1922.2,54006.0079
|
|
||||||
2025-03-14 16:00:00,1922.2,1937.56,1914.19,1936.44,20005.6684
|
|
||||||
2025-03-14 17:00:00,1936.43,1939.99,1927.19,1934.8,17174.2312
|
|
||||||
2025-03-14 18:00:00,1934.81,1945.64,1927.6,1935.02,16784.0532
|
|
||||||
2025-03-14 19:00:00,1935.02,1945.25,1930.8,1934.16,13650.7222
|
|
||||||
2025-03-14 20:00:00,1934.16,1934.94,1916.08,1924.89,14954.8718
|
|
||||||
2025-03-14 21:00:00,1924.9,1925.77,1915.45,1922.49,7217.9651
|
|
||||||
2025-03-14 22:00:00,1922.49,1930.5,1920.01,1926.85,7138.8572
|
|
||||||
2025-03-14 23:00:00,1926.85,1926.93,1908.38,1911.65,8030.6264
|
|
||||||
2025-03-15 00:00:00,1911.64,1918.12,1909.39,1915.44,5148.875
|
|
||||||
2025-03-15 01:00:00,1915.44,1920.35,1903.75,1911.4,8603.4099
|
|
||||||
2025-03-15 02:00:00,1911.4,1916.61,1904.36,1914.01,8311.0074
|
|
||||||
2025-03-15 03:00:00,1914.02,1923.37,1913.06,1920.44,8929.7919
|
|
||||||
2025-03-15 04:00:00,1920.43,1935.21,1915.99,1931.7,11873.6901
|
|
||||||
2025-03-15 05:00:00,1931.7,1932.99,1925.12,1928.6,6563.1035
|
|
||||||
2025-03-15 06:00:00,1928.61,1930.54,1923.2,1928.92,5289.6395
|
|
||||||
2025-03-15 07:00:00,1928.92,1929.52,1914.65,1917.66,9607.2472
|
|
||||||
2025-03-15 08:00:00,1917.68,1921.84,1912.7,1921.79,7039.5485
|
|
||||||
2025-03-15 09:00:00,1921.8,1926.52,1914.33,1925.84,5610.9814
|
|
||||||
2025-03-15 10:00:00,1925.83,1929.39,1919.88,1927.4,8111.6345
|
|
||||||
2025-03-15 11:00:00,1927.39,1931.87,1925.75,1929.0,8740.9626
|
|
||||||
2025-03-15 12:00:00,1929.0,1931.33,1923.0,1923.0,7240.2472
|
|
||||||
2025-03-15 13:00:00,1923.01,1933.34,1922.55,1931.16,7334.855
|
|
||||||
2025-03-15 14:00:00,1931.15,1931.15,1923.19,1926.75,6432.8491
|
|
||||||
2025-03-15 15:00:00,1926.74,1939.6,1926.52,1937.83,14510.1559
|
|
||||||
2025-03-15 16:00:00,1937.84,1945.27,1933.33,1937.78,10236.7785
|
|
||||||
2025-03-15 17:00:00,1937.79,1948.22,1935.27,1940.55,10409.6007
|
|
||||||
2025-03-15 18:00:00,1940.55,1945.49,1930.29,1932.9,12726.2771
|
|
||||||
2025-03-15 19:00:00,1932.9,1937.88,1928.76,1937.31,6719.8944
|
|
||||||
2025-03-15 20:00:00,1937.32,1949.47,1937.31,1943.86,10848.0357
|
|
||||||
2025-03-15 21:00:00,1943.86,1957.19,1940.0,1950.84,16344.9515
|
|
||||||
2025-03-15 22:00:00,1950.85,1954.93,1939.53,1943.8,8519.6127
|
|
||||||
2025-03-15 23:00:00,1943.79,1944.94,1936.37,1937.17,4392.7182
|
|
||||||
2025-03-16 00:00:00,1937.18,1940.87,1931.34,1932.18,7216.8297
|
|
||||||
2025-03-16 01:00:00,1932.19,1934.54,1920.36,1922.49,10675.6473
|
|
||||||
2025-03-16 02:00:00,1922.48,1927.8,1916.56,1922.21,8494.2039
|
|
||||||
2025-03-16 03:00:00,1922.21,1930.36,1921.48,1927.52,4825.6288
|
|
||||||
2025-03-16 04:00:00,1927.52,1929.3,1923.55,1926.74,3575.6206
|
|
||||||
2025-03-16 05:00:00,1926.73,1933.56,1925.71,1932.12,5529.6761
|
|
||||||
2025-03-16 06:00:00,1932.12,1934.0,1927.64,1927.91,3736.1854
|
|
||||||
2025-03-16 07:00:00,1927.91,1929.01,1921.53,1925.89,9820.2359
|
|
||||||
2025-03-16 08:00:00,1925.89,1926.92,1918.45,1922.68,7514.9769
|
|
||||||
2025-03-16 09:00:00,1922.68,1923.15,1905.23,1907.76,18703.1435
|
|
||||||
2025-03-16 10:00:00,1907.75,1913.68,1903.8,1905.37,15327.612
|
|
||||||
2025-03-16 11:00:00,1905.37,1905.37,1866.9,1874.0,48514.8406
|
|
||||||
2025-03-16 12:00:00,1874.01,1889.46,1873.42,1881.01,16779.754
|
|
||||||
2025-03-16 13:00:00,1881.0,1894.69,1874.36,1879.47,16691.8296
|
|
||||||
2025-03-16 14:00:00,1879.46,1897.13,1878.77,1896.83,11097.2014
|
|
||||||
2025-03-16 15:00:00,1896.82,1898.33,1887.94,1895.01,8934.9866
|
|
||||||
2025-03-16 16:00:00,1895.01,1924.11,1893.43,1906.38,33973.1783
|
|
||||||
2025-03-16 17:00:00,1906.38,1916.47,1903.9,1910.15,10335.6246
|
|
||||||
2025-03-16 18:00:00,1910.16,1910.61,1891.41,1896.11,13708.6416
|
|
||||||
2025-03-16 19:00:00,1896.1,1896.67,1860.38,1882.74,28525.624
|
|
||||||
2025-03-16 20:00:00,1882.73,1895.43,1881.9,1894.86,12476.6854
|
|
||||||
2025-03-16 21:00:00,1894.87,1898.63,1884.75,1889.57,9899.8103
|
|
||||||
2025-03-16 22:00:00,1889.56,1891.28,1874.24,1874.69,10284.9644
|
|
||||||
2025-03-16 23:00:00,1874.68,1888.88,1870.1,1887.0,8123.7555
|
|
||||||
2025-03-17 00:00:00,1887.01,1906.41,1886.36,1901.65,9805.8701
|
|
||||||
2025-03-17 01:00:00,1901.65,1910.69,1896.67,1903.05,10888.0146
|
|
||||||
2025-03-17 02:00:00,1903.04,1913.6,1901.36,1907.66,9450.8797
|
|
||||||
2025-03-17 03:00:00,1907.66,1915.44,1906.54,1908.85,11178.2284
|
|
||||||
2025-03-17 04:00:00,1908.85,1912.99,1902.61,1904.91,8708.0636
|
|
||||||
2025-03-17 05:00:00,1904.91,1905.12,1893.76,1894.19,9407.8778
|
|
||||||
2025-03-17 06:00:00,1894.18,1899.21,1879.98,1893.46,12935.1791
|
|
||||||
2025-03-17 07:00:00,1893.5,1903.2,1891.65,1900.74,7830.203
|
|
||||||
2025-03-17 08:00:00,1900.75,1905.35,1893.0,1903.73,7470.3373
|
|
||||||
2025-03-17 09:00:00,1903.74,1913.22,1900.56,1912.77,11639.8785
|
|
||||||
2025-03-17 10:00:00,1912.76,1918.42,1908.11,1912.95,11356.277
|
|
||||||
2025-03-17 11:00:00,1912.94,1916.91,1908.0,1910.59,7033.406
|
|
||||||
2025-03-17 12:00:00,1910.58,1939.03,1902.8,1908.28,35770.9257
|
|
||||||
2025-03-17 13:00:00,1908.27,1914.26,1894.0,1898.66,18123.9237
|
|
||||||
2025-03-17 14:00:00,1898.66,1912.85,1888.01,1899.84,26578.3631
|
|
||||||
2025-03-17 15:00:00,1899.85,1922.9,1894.14,1919.47,27274.6945
|
|
||||||
2025-03-17 16:00:00,1919.47,1925.51,1907.74,1914.64,14079.7942
|
|
||||||
2025-03-17 17:00:00,1914.64,1935.46,1912.39,1930.65,14329.1133
|
|
||||||
2025-03-17 18:00:00,1930.65,1951.68,1929.99,1948.56,26391.1296
|
|
||||||
2025-03-17 19:00:00,1948.56,1952.4,1940.48,1942.23,16515.4473
|
|
||||||
2025-03-17 20:00:00,1942.22,1942.92,1931.47,1935.25,12007.0817
|
|
||||||
2025-03-17 21:00:00,1935.26,1949.5,1933.62,1941.26,11043.8693
|
|
||||||
2025-03-17 22:00:00,1941.26,1942.85,1932.05,1932.21,7605.2422
|
|
||||||
2025-03-17 23:00:00,1932.2,1932.49,1922.2,1926.31,7416.688
|
|
||||||
2025-03-18 00:00:00,1926.3,1930.25,1919.16,1922.17,6985.822
|
|
||||||
2025-03-18 01:00:00,1922.17,1924.75,1900.04,1903.55,14555.5309
|
|
||||||
2025-03-18 02:00:00,1903.55,1917.68,1902.96,1911.28,12037.9187
|
|
||||||
2025-03-18 03:00:00,1911.28,1911.44,1897.47,1901.17,18487.4529
|
|
||||||
2025-03-18 04:00:00,1901.17,1908.31,1900.0,1904.17,7237.1359
|
|
||||||
2025-03-18 05:00:00,1904.17,1910.41,1904.17,1907.49,5231.682
|
|
||||||
2025-03-18 06:00:00,1907.49,1907.68,1893.86,1898.33,14784.2542
|
|
||||||
2025-03-18 07:00:00,1898.33,1910.08,1897.15,1905.06,10466.9707
|
|
||||||
2025-03-18 08:00:00,1905.05,1913.09,1901.07,1908.6,14063.5742
|
|
||||||
2025-03-18 09:00:00,1908.61,1912.0,1900.0,1901.26,12082.3419
|
|
||||||
2025-03-18 10:00:00,1901.26,1902.98,1885.58,1895.36,20922.741
|
|
||||||
2025-03-18 11:00:00,1895.36,1898.73,1889.09,1892.43,8533.4138
|
|
||||||
2025-03-18 12:00:00,1892.42,1901.0,1889.38,1895.98,12961.9282
|
|
||||||
2025-03-18 13:00:00,1895.98,1903.2,1872.31,1879.81,31060.1605
|
|
||||||
2025-03-18 14:00:00,1879.82,1897.2,1875.0,1894.87,22835.2268
|
|
||||||
2025-03-18 15:00:00,1894.86,1898.01,1877.59,1881.02,20420.7535
|
|
||||||
2025-03-18 16:00:00,1881.03,1892.08,1876.02,1887.79,23994.0063
|
|
||||||
2025-03-18 17:00:00,1887.79,1889.56,1874.79,1876.78,11327.3757
|
|
||||||
2025-03-18 18:00:00,1876.78,1892.25,1873.6,1890.38,8882.4624
|
|
||||||
2025-03-18 19:00:00,1890.38,1908.39,1888.17,1904.57,12323.8814
|
|
||||||
2025-03-18 20:00:00,1904.57,1910.89,1898.65,1906.56,6373.0862
|
|
||||||
2025-03-18 21:00:00,1906.56,1915.0,1905.72,1910.02,6821.3878
|
|
|
501
cache/ETH_USDT_1m_candles.csv
vendored
501
cache/ETH_USDT_1m_candles.csv
vendored
@ -1,501 +0,0 @@
|
|||||||
timestamp,open,high,low,close,volume
|
|
||||||
2025-03-18 13:17:00,1894.47,1895.46,1893.43,1893.72,201.88
|
|
||||||
2025-03-18 13:18:00,1893.73,1894.4,1893.65,1894.39,143.5158
|
|
||||||
2025-03-18 13:19:00,1894.4,1897.48,1894.39,1897.47,180.2934
|
|
||||||
2025-03-18 13:20:00,1897.46,1898.66,1897.32,1897.32,380.8197
|
|
||||||
2025-03-18 13:21:00,1897.31,1897.32,1895.97,1896.22,146.91
|
|
||||||
2025-03-18 13:22:00,1896.21,1897.19,1895.89,1896.47,166.6595
|
|
||||||
2025-03-18 13:23:00,1896.46,1897.32,1896.06,1897.25,164.5523
|
|
||||||
2025-03-18 13:24:00,1897.21,1899.78,1896.63,1899.22,375.3178
|
|
||||||
2025-03-18 13:25:00,1899.21,1900.34,1898.9,1899.74,221.3541
|
|
||||||
2025-03-18 13:26:00,1899.75,1901.07,1899.27,1899.83,218.0689
|
|
||||||
2025-03-18 13:27:00,1899.83,1900.29,1898.48,1899.21,154.3351
|
|
||||||
2025-03-18 13:28:00,1899.21,1900.08,1898.32,1898.33,175.2993
|
|
||||||
2025-03-18 13:29:00,1898.33,1900.34,1898.32,1900.33,211.7064
|
|
||||||
2025-03-18 13:30:00,1900.34,1900.73,1897.01,1897.01,408.0837
|
|
||||||
2025-03-18 13:31:00,1897.0,1897.01,1895.0,1895.57,324.3279
|
|
||||||
2025-03-18 13:32:00,1895.57,1897.32,1894.89,1897.32,412.239
|
|
||||||
2025-03-18 13:33:00,1897.32,1903.2,1897.32,1902.53,586.91
|
|
||||||
2025-03-18 13:34:00,1902.53,1902.62,1895.87,1896.67,454.8988
|
|
||||||
2025-03-18 13:35:00,1896.69,1897.02,1891.91,1892.07,1178.8784
|
|
||||||
2025-03-18 13:36:00,1892.06,1892.18,1879.37,1885.68,5498.9763
|
|
||||||
2025-03-18 13:37:00,1885.67,1885.67,1881.19,1883.81,2146.0063
|
|
||||||
2025-03-18 13:38:00,1883.8,1887.44,1883.03,1887.44,719.6235
|
|
||||||
2025-03-18 13:39:00,1887.44,1889.55,1885.35,1885.89,422.8025
|
|
||||||
2025-03-18 13:40:00,1885.88,1885.88,1880.72,1882.49,616.3721
|
|
||||||
2025-03-18 13:41:00,1882.48,1885.0,1879.28,1884.51,1726.6427
|
|
||||||
2025-03-18 13:42:00,1884.51,1887.14,1883.46,1887.0,278.1634
|
|
||||||
2025-03-18 13:43:00,1887.01,1887.38,1883.62,1883.63,451.4124
|
|
||||||
2025-03-18 13:44:00,1883.63,1883.63,1877.39,1878.01,1312.9382
|
|
||||||
2025-03-18 13:45:00,1878.0,1880.4,1872.58,1878.59,2156.2896
|
|
||||||
2025-03-18 13:46:00,1878.62,1878.78,1874.7,1874.77,978.5623
|
|
||||||
2025-03-18 13:47:00,1874.71,1876.31,1872.82,1873.87,896.9869
|
|
||||||
2025-03-18 13:48:00,1873.87,1877.73,1872.31,1875.87,1391.7108
|
|
||||||
2025-03-18 13:49:00,1875.86,1879.89,1875.36,1877.85,538.3726
|
|
||||||
2025-03-18 13:50:00,1877.84,1878.29,1874.93,1875.89,291.5146
|
|
||||||
2025-03-18 13:51:00,1875.78,1878.63,1874.64,1877.28,276.8449
|
|
||||||
2025-03-18 13:52:00,1877.27,1877.82,1875.0,1875.71,335.2429
|
|
||||||
2025-03-18 13:53:00,1875.7,1879.7,1874.16,1879.36,392.7219
|
|
||||||
2025-03-18 13:54:00,1879.46,1881.68,1878.94,1881.06,357.4413
|
|
||||||
2025-03-18 13:55:00,1881.05,1881.05,1877.27,1878.01,362.0632
|
|
||||||
2025-03-18 13:56:00,1878.0,1879.13,1876.52,1878.9,458.0578
|
|
||||||
2025-03-18 13:57:00,1878.91,1878.91,1876.86,1877.76,247.547
|
|
||||||
2025-03-18 13:58:00,1877.76,1879.99,1877.42,1879.73,397.8595
|
|
||||||
2025-03-18 13:59:00,1879.73,1881.88,1879.73,1879.81,528.195
|
|
||||||
2025-03-18 14:00:00,1879.82,1879.82,1877.22,1879.04,555.8057
|
|
||||||
2025-03-18 14:01:00,1879.05,1880.91,1878.24,1880.29,423.4288
|
|
||||||
2025-03-18 14:02:00,1880.14,1880.9,1877.0,1877.0,454.2569
|
|
||||||
2025-03-18 14:03:00,1877.01,1877.78,1875.0,1875.54,371.9196
|
|
||||||
2025-03-18 14:04:00,1875.53,1879.59,1875.0,1879.28,351.3859
|
|
||||||
2025-03-18 14:05:00,1879.28,1880.02,1876.73,1878.61,401.7783
|
|
||||||
2025-03-18 14:06:00,1878.6,1881.21,1877.2,1881.2,509.8663
|
|
||||||
2025-03-18 14:07:00,1881.2,1883.99,1880.92,1883.25,443.0471
|
|
||||||
2025-03-18 14:08:00,1883.24,1884.22,1880.83,1881.23,368.4236
|
|
||||||
2025-03-18 14:09:00,1881.23,1883.01,1879.2,1882.89,327.4358
|
|
||||||
2025-03-18 14:10:00,1882.89,1883.22,1881.11,1881.73,1508.2242
|
|
||||||
2025-03-18 14:11:00,1881.73,1882.91,1879.56,1882.65,480.6439
|
|
||||||
2025-03-18 14:12:00,1882.65,1883.23,1881.45,1882.68,222.6619
|
|
||||||
2025-03-18 14:13:00,1882.68,1883.01,1882.04,1882.77,110.3709
|
|
||||||
2025-03-18 14:14:00,1882.77,1883.23,1882.0,1882.81,149.6505
|
|
||||||
2025-03-18 14:15:00,1882.81,1885.94,1882.8,1885.58,281.2592
|
|
||||||
2025-03-18 14:16:00,1885.58,1886.15,1883.33,1884.94,301.3109
|
|
||||||
2025-03-18 14:17:00,1884.93,1884.93,1881.2,1881.56,263.172
|
|
||||||
2025-03-18 14:18:00,1881.55,1883.07,1880.48,1881.79,314.1385
|
|
||||||
2025-03-18 14:19:00,1881.78,1882.33,1879.49,1881.61,239.6257
|
|
||||||
2025-03-18 14:20:00,1881.61,1885.0,1880.89,1884.03,595.0278
|
|
||||||
2025-03-18 14:21:00,1884.03,1886.25,1883.87,1883.87,292.7776
|
|
||||||
2025-03-18 14:22:00,1883.87,1886.7,1883.87,1886.7,192.5474
|
|
||||||
2025-03-18 14:23:00,1886.73,1887.38,1885.59,1886.52,341.501
|
|
||||||
2025-03-18 14:24:00,1886.51,1887.48,1885.71,1887.37,257.0766
|
|
||||||
2025-03-18 14:25:00,1887.38,1888.02,1886.36,1887.36,375.8195
|
|
||||||
2025-03-18 14:26:00,1887.36,1887.74,1885.35,1887.74,261.4503
|
|
||||||
2025-03-18 14:27:00,1887.83,1889.02,1887.15,1887.15,326.867
|
|
||||||
2025-03-18 14:28:00,1887.15,1888.25,1887.15,1888.25,227.865
|
|
||||||
2025-03-18 14:29:00,1888.24,1889.27,1888.0,1888.39,511.2814
|
|
||||||
2025-03-18 14:30:00,1888.39,1889.27,1886.0,1886.09,255.6139
|
|
||||||
2025-03-18 14:31:00,1886.1,1887.01,1885.71,1886.0,236.9758
|
|
||||||
2025-03-18 14:32:00,1885.99,1887.38,1885.77,1887.21,185.2908
|
|
||||||
2025-03-18 14:33:00,1887.21,1890.41,1887.21,1890.12,351.487
|
|
||||||
2025-03-18 14:34:00,1890.12,1891.18,1889.67,1890.4,330.4416
|
|
||||||
2025-03-18 14:35:00,1890.41,1890.41,1887.93,1889.57,265.96
|
|
||||||
2025-03-18 14:36:00,1889.56,1891.24,1888.77,1890.86,526.9363
|
|
||||||
2025-03-18 14:37:00,1890.86,1891.25,1889.46,1890.4,296.9224
|
|
||||||
2025-03-18 14:38:00,1890.41,1891.95,1890.4,1891.51,239.5668
|
|
||||||
2025-03-18 14:39:00,1891.52,1893.99,1891.2,1893.64,611.8726
|
|
||||||
2025-03-18 14:40:00,1893.64,1896.96,1893.42,1896.45,833.4466
|
|
||||||
2025-03-18 14:41:00,1896.44,1897.2,1894.58,1896.91,382.1487
|
|
||||||
2025-03-18 14:42:00,1896.92,1896.92,1894.44,1894.44,1220.1249
|
|
||||||
2025-03-18 14:43:00,1894.45,1894.45,1891.85,1891.85,454.4165
|
|
||||||
2025-03-18 14:44:00,1891.85,1891.85,1890.9,1890.99,273.7074
|
|
||||||
2025-03-18 14:45:00,1890.98,1892.28,1890.84,1892.14,248.297
|
|
||||||
2025-03-18 14:46:00,1892.14,1895.02,1891.05,1891.05,955.779
|
|
||||||
2025-03-18 14:47:00,1891.04,1891.42,1888.68,1888.87,837.3826
|
|
||||||
2025-03-18 14:48:00,1888.87,1888.91,1886.85,1887.8,352.7373
|
|
||||||
2025-03-18 14:49:00,1887.8,1888.23,1887.27,1887.67,243.6755
|
|
||||||
2025-03-18 14:50:00,1887.67,1890.57,1887.67,1889.9,204.9159
|
|
||||||
2025-03-18 14:51:00,1889.9,1891.42,1889.75,1890.58,238.0843
|
|
||||||
2025-03-18 14:52:00,1890.58,1890.8,1888.1,1888.68,159.4437
|
|
||||||
2025-03-18 14:53:00,1888.69,1888.69,1887.79,1888.48,128.8896
|
|
||||||
2025-03-18 14:54:00,1888.47,1889.74,1888.0,1889.73,99.4452
|
|
||||||
2025-03-18 14:55:00,1889.72,1891.83,1889.35,1891.62,165.2777
|
|
||||||
2025-03-18 14:56:00,1891.61,1892.69,1891.28,1892.43,161.6759
|
|
||||||
2025-03-18 14:57:00,1892.43,1892.43,1890.4,1890.93,232.8887
|
|
||||||
2025-03-18 14:58:00,1890.93,1892.76,1890.27,1892.76,428.905
|
|
||||||
2025-03-18 14:59:00,1892.75,1895.74,1892.75,1894.87,452.2988
|
|
||||||
2025-03-18 15:00:00,1894.86,1894.87,1892.28,1892.47,173.5345
|
|
||||||
2025-03-18 15:01:00,1892.46,1892.46,1887.61,1887.8,515.0224
|
|
||||||
2025-03-18 15:02:00,1887.8,1889.58,1887.7,1889.58,204.7611
|
|
||||||
2025-03-18 15:03:00,1889.58,1890.27,1888.25,1889.23,213.6029
|
|
||||||
2025-03-18 15:04:00,1889.23,1891.28,1889.22,1891.07,231.463
|
|
||||||
2025-03-18 15:05:00,1891.07,1891.28,1889.39,1889.46,148.5623
|
|
||||||
2025-03-18 15:06:00,1889.45,1889.45,1886.98,1887.14,181.1105
|
|
||||||
2025-03-18 15:07:00,1887.15,1888.26,1886.39,1887.21,400.1429
|
|
||||||
2025-03-18 15:08:00,1887.22,1888.83,1886.36,1888.82,422.9228
|
|
||||||
2025-03-18 15:09:00,1888.83,1889.74,1888.01,1888.01,251.6567
|
|
||||||
2025-03-18 15:10:00,1888.02,1888.2,1886.36,1887.3,86.5493
|
|
||||||
2025-03-18 15:11:00,1887.31,1889.15,1886.56,1888.95,145.901
|
|
||||||
2025-03-18 15:12:00,1888.95,1890.64,1888.95,1890.64,195.3096
|
|
||||||
2025-03-18 15:13:00,1890.64,1892.21,1889.52,1889.81,275.9927
|
|
||||||
2025-03-18 15:14:00,1889.8,1891.75,1889.39,1891.21,147.5228
|
|
||||||
2025-03-18 15:15:00,1891.21,1891.82,1890.4,1890.4,118.4237
|
|
||||||
2025-03-18 15:16:00,1890.4,1891.28,1889.61,1890.54,99.3839
|
|
||||||
2025-03-18 15:17:00,1890.55,1890.55,1887.92,1889.07,269.0475
|
|
||||||
2025-03-18 15:18:00,1889.08,1892.25,1889.08,1892.25,400.7508
|
|
||||||
2025-03-18 15:19:00,1892.25,1892.43,1890.86,1891.22,95.5397
|
|
||||||
2025-03-18 15:20:00,1891.22,1892.56,1891.22,1891.42,176.3232
|
|
||||||
2025-03-18 15:21:00,1891.41,1892.85,1891.41,1891.99,241.1644
|
|
||||||
2025-03-18 15:22:00,1891.98,1892.76,1890.41,1890.78,194.7886
|
|
||||||
2025-03-18 15:23:00,1890.78,1891.61,1890.68,1891.61,76.5723
|
|
||||||
2025-03-18 15:24:00,1891.6,1892.0,1889.52,1890.95,107.9965
|
|
||||||
2025-03-18 15:25:00,1890.95,1894.26,1890.95,1893.93,301.428
|
|
||||||
2025-03-18 15:26:00,1893.92,1895.19,1893.92,1894.3,668.2455
|
|
||||||
2025-03-18 15:27:00,1894.3,1895.99,1893.88,1894.95,231.7342
|
|
||||||
2025-03-18 15:28:00,1894.96,1896.0,1894.31,1895.57,236.7206
|
|
||||||
2025-03-18 15:29:00,1895.56,1895.94,1893.28,1893.29,272.2448
|
|
||||||
2025-03-18 15:30:00,1893.29,1894.29,1892.33,1893.12,225.278
|
|
||||||
2025-03-18 15:31:00,1893.12,1893.44,1891.39,1892.35,188.4827
|
|
||||||
2025-03-18 15:32:00,1892.29,1892.43,1891.41,1892.02,273.4488
|
|
||||||
2025-03-18 15:33:00,1892.01,1894.3,1891.62,1894.3,207.2045
|
|
||||||
2025-03-18 15:34:00,1894.3,1895.39,1893.96,1893.97,224.7566
|
|
||||||
2025-03-18 15:35:00,1893.96,1894.29,1892.88,1894.29,269.2245
|
|
||||||
2025-03-18 15:36:00,1894.3,1897.19,1894.29,1896.3,306.3003
|
|
||||||
2025-03-18 15:37:00,1896.31,1896.6,1895.3,1895.4,378.5009
|
|
||||||
2025-03-18 15:38:00,1895.41,1897.32,1895.41,1897.32,181.6041
|
|
||||||
2025-03-18 15:39:00,1897.31,1897.32,1894.79,1896.12,293.024
|
|
||||||
2025-03-18 15:40:00,1896.13,1898.01,1895.07,1895.32,766.5088
|
|
||||||
2025-03-18 15:41:00,1895.32,1895.32,1893.42,1893.9,146.6163
|
|
||||||
2025-03-18 15:42:00,1893.9,1894.78,1892.42,1892.42,252.9175
|
|
||||||
2025-03-18 15:43:00,1892.42,1893.39,1891.91,1892.58,792.4104
|
|
||||||
2025-03-18 15:44:00,1892.57,1892.78,1891.86,1892.12,184.4598
|
|
||||||
2025-03-18 15:45:00,1892.13,1893.82,1892.12,1893.43,249.9519
|
|
||||||
2025-03-18 15:46:00,1893.44,1893.78,1890.53,1890.53,229.3011
|
|
||||||
2025-03-18 15:47:00,1890.53,1890.54,1887.2,1887.73,536.0764
|
|
||||||
2025-03-18 15:48:00,1887.72,1888.38,1887.5,1887.85,184.7191
|
|
||||||
2025-03-18 15:49:00,1887.84,1888.49,1887.05,1887.1,165.97
|
|
||||||
2025-03-18 15:50:00,1887.1,1887.1,1883.85,1883.95,347.974
|
|
||||||
2025-03-18 15:51:00,1883.95,1885.5,1883.01,1884.42,948.4755
|
|
||||||
2025-03-18 15:52:00,1884.43,1885.6,1884.0,1885.23,114.7464
|
|
||||||
2025-03-18 15:53:00,1885.24,1885.28,1883.43,1883.97,81.1754
|
|
||||||
2025-03-18 15:54:00,1883.98,1884.7,1882.0,1883.0,208.6113
|
|
||||||
2025-03-18 15:55:00,1883.01,1884.17,1881.64,1881.75,717.713
|
|
||||||
2025-03-18 15:56:00,1881.75,1882.01,1880.89,1880.89,230.0762
|
|
||||||
2025-03-18 15:57:00,1880.89,1881.11,1879.2,1880.68,2063.4721
|
|
||||||
2025-03-18 15:58:00,1880.69,1881.63,1879.0,1879.98,1560.1975
|
|
||||||
2025-03-18 15:59:00,1880.0,1881.81,1877.59,1881.02,1007.1382
|
|
||||||
2025-03-18 16:00:00,1881.03,1881.63,1879.81,1881.2,259.2258
|
|
||||||
2025-03-18 16:01:00,1881.21,1883.74,1881.21,1883.35,272.1406
|
|
||||||
2025-03-18 16:02:00,1883.35,1884.23,1883.24,1884.22,182.7172
|
|
||||||
2025-03-18 16:03:00,1884.23,1884.35,1882.7,1884.34,189.8824
|
|
||||||
2025-03-18 16:04:00,1884.35,1884.79,1883.55,1883.83,221.4221
|
|
||||||
2025-03-18 16:05:00,1883.83,1885.88,1883.83,1885.86,271.2742
|
|
||||||
2025-03-18 16:06:00,1885.87,1885.87,1883.2,1884.5,274.1512
|
|
||||||
2025-03-18 16:07:00,1884.5,1884.5,1882.45,1882.45,277.6826
|
|
||||||
2025-03-18 16:08:00,1882.45,1882.45,1879.5,1879.77,247.5398
|
|
||||||
2025-03-18 16:09:00,1879.78,1880.33,1879.78,1880.21,186.6317
|
|
||||||
2025-03-18 16:10:00,1880.2,1882.39,1880.2,1882.39,280.3119
|
|
||||||
2025-03-18 16:11:00,1882.39,1884.01,1881.88,1883.9,407.6755
|
|
||||||
2025-03-18 16:12:00,1883.91,1885.23,1883.91,1885.2,313.7331
|
|
||||||
2025-03-18 16:13:00,1885.2,1885.41,1883.78,1883.79,222.3058
|
|
||||||
2025-03-18 16:14:00,1883.78,1886.16,1883.56,1885.97,211.4037
|
|
||||||
2025-03-18 16:15:00,1885.97,1886.58,1882.89,1883.07,782.523
|
|
||||||
2025-03-18 16:16:00,1883.08,1883.81,1882.54,1882.83,68.4737
|
|
||||||
2025-03-18 16:17:00,1882.84,1883.8,1881.52,1881.67,224.2149
|
|
||||||
2025-03-18 16:18:00,1881.67,1883.54,1881.41,1883.23,128.5439
|
|
||||||
2025-03-18 16:19:00,1883.23,1883.7,1880.54,1881.23,280.7795
|
|
||||||
2025-03-18 16:20:00,1881.24,1883.7,1880.41,1883.7,268.4642
|
|
||||||
2025-03-18 16:21:00,1883.7,1885.36,1883.7,1885.05,92.9673
|
|
||||||
2025-03-18 16:22:00,1885.04,1885.68,1883.51,1885.15,133.5352
|
|
||||||
2025-03-18 16:23:00,1885.14,1885.14,1882.24,1882.59,198.6843
|
|
||||||
2025-03-18 16:24:00,1882.59,1882.88,1881.31,1882.88,171.1801
|
|
||||||
2025-03-18 16:25:00,1882.88,1883.33,1882.39,1883.33,100.252
|
|
||||||
2025-03-18 16:26:00,1883.34,1885.95,1883.34,1885.34,323.1068
|
|
||||||
2025-03-18 16:27:00,1885.34,1886.66,1885.34,1886.66,93.618
|
|
||||||
2025-03-18 16:28:00,1886.65,1887.37,1885.58,1886.82,240.5747
|
|
||||||
2025-03-18 16:29:00,1886.82,1889.93,1886.8,1889.5,773.2949
|
|
||||||
2025-03-18 16:30:00,1889.49,1892.08,1889.15,1890.99,1967.4821
|
|
||||||
2025-03-18 16:31:00,1890.99,1891.67,1888.89,1889.35,450.3739
|
|
||||||
2025-03-18 16:32:00,1889.35,1891.73,1889.26,1891.42,149.3304
|
|
||||||
2025-03-18 16:33:00,1891.41,1891.41,1889.0,1889.11,163.4986
|
|
||||||
2025-03-18 16:34:00,1889.11,1889.11,1887.83,1888.38,138.5823
|
|
||||||
2025-03-18 16:35:00,1888.39,1890.06,1888.38,1889.28,131.6046
|
|
||||||
2025-03-18 16:36:00,1889.29,1890.85,1889.29,1889.76,246.8128
|
|
||||||
2025-03-18 16:37:00,1889.76,1890.24,1887.52,1887.53,77.7531
|
|
||||||
2025-03-18 16:38:00,1887.52,1888.0,1886.23,1886.23,150.1942
|
|
||||||
2025-03-18 16:39:00,1886.14,1886.14,1884.71,1885.61,102.3521
|
|
||||||
2025-03-18 16:40:00,1885.61,1885.63,1884.69,1885.62,49.944
|
|
||||||
2025-03-18 16:41:00,1885.63,1885.63,1884.82,1885.4,81.2706
|
|
||||||
2025-03-18 16:42:00,1885.41,1885.41,1883.52,1883.52,96.1382
|
|
||||||
2025-03-18 16:43:00,1883.51,1884.52,1882.32,1882.32,165.2667
|
|
||||||
2025-03-18 16:44:00,1882.31,1882.32,1880.08,1880.45,153.9849
|
|
||||||
2025-03-18 16:45:00,1880.46,1881.83,1879.51,1881.2,480.6408
|
|
||||||
2025-03-18 16:46:00,1881.21,1881.99,1878.77,1878.78,253.1465
|
|
||||||
2025-03-18 16:47:00,1878.78,1879.56,1876.83,1877.01,948.3588
|
|
||||||
2025-03-18 16:48:00,1877.0,1879.55,1876.02,1879.42,1645.345
|
|
||||||
2025-03-18 16:49:00,1879.42,1880.42,1879.19,1879.71,372.2603
|
|
||||||
2025-03-18 16:50:00,1879.7,1879.9,1878.28,1879.3,94.5473
|
|
||||||
2025-03-18 16:51:00,1879.3,1879.3,1877.03,1877.69,465.0663
|
|
||||||
2025-03-18 16:52:00,1877.68,1879.78,1877.6,1879.21,1598.5694
|
|
||||||
2025-03-18 16:53:00,1879.2,1883.32,1879.2,1882.97,1469.9812
|
|
||||||
2025-03-18 16:54:00,1882.98,1886.25,1882.98,1886.01,1886.1856
|
|
||||||
2025-03-18 16:55:00,1886.0,1888.14,1885.8,1887.11,1764.6658
|
|
||||||
2025-03-18 16:56:00,1887.12,1887.53,1885.7,1887.24,493.7718
|
|
||||||
2025-03-18 16:57:00,1887.25,1889.27,1887.25,1888.05,304.3982
|
|
||||||
2025-03-18 16:58:00,1888.04,1888.93,1887.94,1887.97,170.5575
|
|
||||||
2025-03-18 16:59:00,1887.98,1889.49,1887.37,1887.79,223.6132
|
|
||||||
2025-03-18 17:00:00,1887.79,1888.25,1887.11,1887.59,273.1106
|
|
||||||
2025-03-18 17:01:00,1887.6,1887.77,1885.65,1887.17,219.5687
|
|
||||||
2025-03-18 17:02:00,1887.16,1888.63,1887.02,1887.65,161.8751
|
|
||||||
2025-03-18 17:03:00,1887.65,1889.56,1887.65,1889.03,253.1672
|
|
||||||
2025-03-18 17:04:00,1889.02,1889.02,1887.37,1887.37,70.8437
|
|
||||||
2025-03-18 17:05:00,1887.37,1887.37,1884.81,1885.05,130.2218
|
|
||||||
2025-03-18 17:06:00,1885.04,1885.04,1881.53,1881.96,181.2031
|
|
||||||
2025-03-18 17:07:00,1881.96,1882.16,1880.3,1880.31,176.7722
|
|
||||||
2025-03-18 17:08:00,1880.31,1881.58,1880.3,1881.26,162.261
|
|
||||||
2025-03-18 17:09:00,1881.26,1882.22,1881.2,1881.26,158.7651
|
|
||||||
2025-03-18 17:10:00,1881.26,1881.75,1880.43,1881.75,164.0971
|
|
||||||
2025-03-18 17:11:00,1881.75,1884.09,1881.75,1884.08,337.8793
|
|
||||||
2025-03-18 17:12:00,1884.08,1884.61,1883.93,1884.6,78.9549
|
|
||||||
2025-03-18 17:13:00,1884.61,1885.95,1884.6,1885.95,54.9415
|
|
||||||
2025-03-18 17:14:00,1885.95,1885.95,1884.6,1884.61,161.4155
|
|
||||||
2025-03-18 17:15:00,1884.61,1884.67,1883.0,1883.01,79.2387
|
|
||||||
2025-03-18 17:16:00,1883.0,1883.79,1882.32,1882.84,60.6262
|
|
||||||
2025-03-18 17:17:00,1882.84,1884.23,1882.84,1883.62,51.1015
|
|
||||||
2025-03-18 17:18:00,1883.62,1884.06,1881.49,1882.92,106.389
|
|
||||||
2025-03-18 17:19:00,1882.92,1883.43,1878.4,1878.52,2160.3299
|
|
||||||
2025-03-18 17:20:00,1878.52,1879.84,1878.28,1878.29,195.4271
|
|
||||||
2025-03-18 17:21:00,1878.28,1881.32,1878.28,1881.05,202.9947
|
|
||||||
2025-03-18 17:22:00,1881.05,1882.74,1879.92,1879.92,171.7259
|
|
||||||
2025-03-18 17:23:00,1879.93,1880.0,1877.54,1878.9,206.8
|
|
||||||
2025-03-18 17:24:00,1878.9,1880.21,1878.37,1880.2,104.8725
|
|
||||||
2025-03-18 17:25:00,1880.2,1881.76,1880.2,1880.95,148.2309
|
|
||||||
2025-03-18 17:26:00,1880.95,1881.8,1880.38,1880.79,99.0921
|
|
||||||
2025-03-18 17:27:00,1880.78,1882.29,1880.51,1881.2,81.9764
|
|
||||||
2025-03-18 17:28:00,1881.2,1881.76,1879.38,1879.45,109.7147
|
|
||||||
2025-03-18 17:29:00,1879.46,1880.0,1878.91,1879.98,121.7476
|
|
||||||
2025-03-18 17:30:00,1879.98,1879.98,1878.38,1878.67,71.9474
|
|
||||||
2025-03-18 17:31:00,1878.67,1880.85,1878.6,1880.0,117.4228
|
|
||||||
2025-03-18 17:32:00,1880.0,1880.31,1878.18,1878.19,146.8095
|
|
||||||
2025-03-18 17:33:00,1878.18,1878.19,1874.79,1875.5,525.2289
|
|
||||||
2025-03-18 17:34:00,1875.51,1879.0,1875.51,1879.0,148.0866
|
|
||||||
2025-03-18 17:35:00,1879.0,1881.09,1879.0,1880.54,148.9658
|
|
||||||
2025-03-18 17:36:00,1880.54,1882.09,1880.01,1881.52,207.079
|
|
||||||
2025-03-18 17:37:00,1881.52,1882.65,1881.27,1881.41,608.4243
|
|
||||||
2025-03-18 17:38:00,1881.41,1883.34,1881.41,1883.21,188.9361
|
|
||||||
2025-03-18 17:39:00,1883.21,1884.82,1882.41,1884.81,128.4674
|
|
||||||
2025-03-18 17:40:00,1884.82,1885.24,1883.35,1883.91,160.3052
|
|
||||||
2025-03-18 17:41:00,1883.9,1883.9,1882.32,1882.67,132.6583
|
|
||||||
2025-03-18 17:42:00,1882.67,1883.55,1882.32,1882.48,54.3373
|
|
||||||
2025-03-18 17:43:00,1882.47,1882.93,1882.32,1882.91,34.719
|
|
||||||
2025-03-18 17:44:00,1882.91,1882.91,1881.54,1881.71,98.5374
|
|
||||||
2025-03-18 17:45:00,1881.71,1883.19,1881.5,1882.97,61.6772
|
|
||||||
2025-03-18 17:46:00,1882.98,1882.98,1880.2,1880.3,135.0792
|
|
||||||
2025-03-18 17:47:00,1880.31,1880.63,1877.73,1877.87,151.2017
|
|
||||||
2025-03-18 17:48:00,1877.88,1879.58,1877.43,1879.19,229.6866
|
|
||||||
2025-03-18 17:49:00,1879.2,1879.2,1878.18,1878.4,46.7495
|
|
||||||
2025-03-18 17:50:00,1878.41,1880.0,1878.28,1880.0,137.2735
|
|
||||||
2025-03-18 17:51:00,1880.0,1880.42,1879.69,1880.18,108.6852
|
|
||||||
2025-03-18 17:52:00,1880.19,1881.13,1879.3,1879.34,66.1456
|
|
||||||
2025-03-18 17:53:00,1879.34,1880.91,1879.32,1879.53,85.9544
|
|
||||||
2025-03-18 17:54:00,1879.53,1879.92,1878.13,1878.13,136.4773
|
|
||||||
2025-03-18 17:55:00,1878.06,1878.06,1876.26,1876.76,255.1371
|
|
||||||
2025-03-18 17:56:00,1876.76,1876.76,1875.25,1876.63,135.3766
|
|
||||||
2025-03-18 17:57:00,1876.64,1877.57,1876.26,1876.94,117.2297
|
|
||||||
2025-03-18 17:58:00,1876.94,1879.75,1876.94,1879.75,137.0594
|
|
||||||
2025-03-18 17:59:00,1879.75,1879.75,1875.82,1876.78,266.3737
|
|
||||||
2025-03-18 18:00:00,1876.78,1878.7,1876.39,1878.24,112.3712
|
|
||||||
2025-03-18 18:01:00,1878.22,1878.22,1876.26,1876.26,163.7698
|
|
||||||
2025-03-18 18:02:00,1876.27,1878.93,1876.27,1878.58,96.4189
|
|
||||||
2025-03-18 18:03:00,1878.58,1878.73,1876.26,1876.35,116.5991
|
|
||||||
2025-03-18 18:04:00,1876.34,1876.34,1873.66,1874.74,328.7024
|
|
||||||
2025-03-18 18:05:00,1874.74,1877.17,1873.6,1877.17,180.6999
|
|
||||||
2025-03-18 18:06:00,1877.17,1879.14,1876.82,1878.46,283.6212
|
|
||||||
2025-03-18 18:07:00,1878.46,1878.97,1877.18,1877.19,195.5127
|
|
||||||
2025-03-18 18:08:00,1877.18,1878.87,1877.1,1878.87,176.6051
|
|
||||||
2025-03-18 18:09:00,1878.86,1879.86,1878.86,1879.86,92.1913
|
|
||||||
2025-03-18 18:10:00,1879.86,1879.86,1876.81,1878.19,105.6934
|
|
||||||
2025-03-18 18:11:00,1878.18,1878.19,1877.18,1877.32,71.4273
|
|
||||||
2025-03-18 18:12:00,1877.32,1879.2,1876.89,1879.19,229.0516
|
|
||||||
2025-03-18 18:13:00,1879.2,1880.65,1878.49,1880.65,89.3007
|
|
||||||
2025-03-18 18:14:00,1880.65,1881.21,1879.69,1880.8,142.0351
|
|
||||||
2025-03-18 18:15:00,1880.8,1880.99,1879.4,1880.59,338.9413
|
|
||||||
2025-03-18 18:16:00,1880.59,1881.92,1880.58,1881.21,71.8469
|
|
||||||
2025-03-18 18:17:00,1881.21,1881.91,1881.01,1881.12,87.0399
|
|
||||||
2025-03-18 18:18:00,1881.11,1882.33,1881.0,1882.33,128.2447
|
|
||||||
2025-03-18 18:19:00,1882.33,1882.89,1881.32,1882.58,72.6358
|
|
||||||
2025-03-18 18:20:00,1882.58,1883.09,1882.21,1883.09,79.7689
|
|
||||||
2025-03-18 18:21:00,1883.09,1884.17,1881.54,1883.7,160.3901
|
|
||||||
2025-03-18 18:22:00,1883.71,1884.58,1883.6,1883.7,75.0932
|
|
||||||
2025-03-18 18:23:00,1883.7,1884.72,1883.04,1883.04,64.0117
|
|
||||||
2025-03-18 18:24:00,1883.04,1883.04,1881.2,1882.1,64.2111
|
|
||||||
2025-03-18 18:25:00,1882.1,1883.23,1881.2,1883.22,46.7843
|
|
||||||
2025-03-18 18:26:00,1883.23,1883.46,1882.32,1882.32,71.5642
|
|
||||||
2025-03-18 18:27:00,1882.32,1882.33,1880.66,1880.86,37.399
|
|
||||||
2025-03-18 18:28:00,1880.86,1880.86,1877.6,1877.97,206.6967
|
|
||||||
2025-03-18 18:29:00,1877.97,1877.97,1876.88,1876.88,141.0429
|
|
||||||
2025-03-18 18:30:00,1876.88,1879.2,1876.49,1879.2,67.5536
|
|
||||||
2025-03-18 18:31:00,1879.2,1880.09,1878.28,1878.39,100.4911
|
|
||||||
2025-03-18 18:32:00,1878.39,1878.54,1876.02,1876.03,136.4974
|
|
||||||
2025-03-18 18:33:00,1876.02,1876.4,1875.0,1875.0,119.7464
|
|
||||||
2025-03-18 18:34:00,1875.01,1876.56,1873.64,1876.46,169.564
|
|
||||||
2025-03-18 18:35:00,1876.46,1878.26,1876.46,1878.26,74.705
|
|
||||||
2025-03-18 18:36:00,1878.26,1878.61,1877.65,1878.61,69.4906
|
|
||||||
2025-03-18 18:37:00,1878.61,1879.93,1878.18,1879.89,120.4002
|
|
||||||
2025-03-18 18:38:00,1879.9,1881.43,1879.89,1881.43,145.4075
|
|
||||||
2025-03-18 18:39:00,1881.43,1881.58,1881.03,1881.04,77.8664
|
|
||||||
2025-03-18 18:40:00,1881.04,1881.92,1880.2,1881.48,116.8225
|
|
||||||
2025-03-18 18:41:00,1881.48,1883.01,1881.2,1883.0,135.1433
|
|
||||||
2025-03-18 18:42:00,1883.0,1883.86,1881.08,1881.09,61.5755
|
|
||||||
2025-03-18 18:43:00,1881.09,1881.32,1880.2,1880.3,54.1815
|
|
||||||
2025-03-18 18:44:00,1880.31,1880.86,1880.01,1880.75,56.7077
|
|
||||||
2025-03-18 18:45:00,1880.75,1881.96,1880.3,1881.96,182.9794
|
|
||||||
2025-03-18 18:46:00,1881.95,1884.53,1881.31,1884.53,514.3262
|
|
||||||
2025-03-18 18:47:00,1884.54,1886.67,1884.54,1885.16,219.2121
|
|
||||||
2025-03-18 18:48:00,1885.17,1886.37,1884.8,1884.8,168.7336
|
|
||||||
2025-03-18 18:49:00,1884.81,1887.61,1884.61,1887.55,386.8622
|
|
||||||
2025-03-18 18:50:00,1887.54,1889.46,1887.16,1888.53,734.1086
|
|
||||||
2025-03-18 18:51:00,1888.52,1890.62,1888.52,1890.39,155.0568
|
|
||||||
2025-03-18 18:52:00,1890.38,1892.25,1890.13,1890.87,258.0218
|
|
||||||
2025-03-18 18:53:00,1890.86,1891.69,1889.39,1889.47,152.4864
|
|
||||||
2025-03-18 18:54:00,1889.47,1889.47,1887.52,1888.08,101.422
|
|
||||||
2025-03-18 18:55:00,1888.08,1889.65,1887.63,1889.65,92.7351
|
|
||||||
2025-03-18 18:56:00,1889.65,1889.85,1889.42,1889.51,75.2232
|
|
||||||
2025-03-18 18:57:00,1889.51,1890.4,1888.38,1888.39,109.8622
|
|
||||||
2025-03-18 18:58:00,1888.39,1888.89,1888.25,1888.61,96.9304
|
|
||||||
2025-03-18 18:59:00,1888.6,1890.6,1888.25,1890.38,98.6793
|
|
||||||
2025-03-18 19:00:00,1890.38,1890.9,1889.51,1889.89,68.8263
|
|
||||||
2025-03-18 19:01:00,1889.89,1890.53,1889.59,1889.94,60.8393
|
|
||||||
2025-03-18 19:02:00,1889.94,1891.99,1889.72,1891.5,233.9878
|
|
||||||
2025-03-18 19:03:00,1891.5,1893.12,1891.5,1892.05,124.7598
|
|
||||||
2025-03-18 19:04:00,1892.05,1892.06,1890.74,1890.81,109.3307
|
|
||||||
2025-03-18 19:05:00,1890.82,1891.58,1888.6,1888.61,184.5342
|
|
||||||
2025-03-18 19:06:00,1888.6,1889.4,1888.17,1888.17,87.4092
|
|
||||||
2025-03-18 19:07:00,1888.18,1888.97,1888.18,1888.97,73.6751
|
|
||||||
2025-03-18 19:08:00,1888.98,1890.56,1888.98,1890.56,151.4641
|
|
||||||
2025-03-18 19:09:00,1890.56,1890.9,1890.34,1890.35,71.7682
|
|
||||||
2025-03-18 19:10:00,1890.34,1892.04,1890.34,1892.04,118.5813
|
|
||||||
2025-03-18 19:11:00,1892.04,1892.73,1891.27,1891.55,184.277
|
|
||||||
2025-03-18 19:12:00,1891.55,1892.09,1889.9,1889.9,74.5015
|
|
||||||
2025-03-18 19:13:00,1889.91,1891.05,1889.91,1891.04,63.8816
|
|
||||||
2025-03-18 19:14:00,1891.05,1892.62,1891.05,1892.58,99.772
|
|
||||||
2025-03-18 19:15:00,1892.57,1892.7,1891.63,1892.57,103.6198
|
|
||||||
2025-03-18 19:16:00,1892.58,1894.64,1892.58,1892.64,202.5056
|
|
||||||
2025-03-18 19:17:00,1892.65,1893.06,1891.81,1892.59,102.9337
|
|
||||||
2025-03-18 19:18:00,1892.59,1893.93,1892.11,1892.29,102.5223
|
|
||||||
2025-03-18 19:19:00,1892.29,1894.3,1892.28,1893.61,119.3655
|
|
||||||
2025-03-18 19:20:00,1893.62,1895.46,1893.42,1894.87,168.3367
|
|
||||||
2025-03-18 19:21:00,1894.87,1895.5,1894.44,1894.45,214.4047
|
|
||||||
2025-03-18 19:22:00,1894.44,1895.13,1893.0,1894.35,165.2992
|
|
||||||
2025-03-18 19:23:00,1894.35,1894.63,1893.55,1893.67,96.4527
|
|
||||||
2025-03-18 19:24:00,1893.67,1894.32,1893.67,1894.15,185.0238
|
|
||||||
2025-03-18 19:25:00,1894.16,1894.45,1892.0,1892.01,85.4358
|
|
||||||
2025-03-18 19:26:00,1892.01,1892.39,1891.27,1892.39,85.6997
|
|
||||||
2025-03-18 19:27:00,1892.39,1893.0,1892.13,1892.19,94.4684
|
|
||||||
2025-03-18 19:28:00,1892.18,1893.83,1892.17,1892.43,144.0415
|
|
||||||
2025-03-18 19:29:00,1892.43,1893.44,1892.42,1892.44,202.7154
|
|
||||||
2025-03-18 19:30:00,1892.44,1893.99,1892.44,1893.16,108.0896
|
|
||||||
2025-03-18 19:31:00,1893.17,1896.21,1893.17,1896.21,157.0544
|
|
||||||
2025-03-18 19:32:00,1896.2,1898.99,1896.2,1898.78,345.2592
|
|
||||||
2025-03-18 19:33:00,1898.78,1899.37,1898.32,1898.32,163.4092
|
|
||||||
2025-03-18 19:34:00,1898.33,1898.33,1897.05,1897.27,92.8701
|
|
||||||
2025-03-18 19:35:00,1897.27,1898.24,1896.98,1897.48,73.3609
|
|
||||||
2025-03-18 19:36:00,1897.48,1898.69,1897.15,1897.87,101.6415
|
|
||||||
2025-03-18 19:37:00,1897.87,1898.32,1897.05,1898.0,88.3563
|
|
||||||
2025-03-18 19:38:00,1898.0,1898.16,1897.31,1897.48,53.2163
|
|
||||||
2025-03-18 19:39:00,1897.47,1897.48,1895.7,1895.75,149.9711
|
|
||||||
2025-03-18 19:40:00,1895.76,1897.32,1895.49,1897.32,89.411
|
|
||||||
2025-03-18 19:41:00,1897.32,1898.45,1897.31,1898.45,92.1381
|
|
||||||
2025-03-18 19:42:00,1898.45,1898.66,1897.08,1898.02,70.8204
|
|
||||||
2025-03-18 19:43:00,1898.03,1899.2,1897.83,1898.38,201.5595
|
|
||||||
2025-03-18 19:44:00,1898.38,1898.39,1897.31,1897.95,34.2098
|
|
||||||
2025-03-18 19:45:00,1897.96,1899.59,1897.96,1899.33,131.8136
|
|
||||||
2025-03-18 19:46:00,1899.33,1900.8,1899.04,1899.44,199.8475
|
|
||||||
2025-03-18 19:47:00,1899.44,1901.19,1899.44,1900.91,121.9455
|
|
||||||
2025-03-18 19:48:00,1900.91,1903.18,1900.65,1900.8,532.4673
|
|
||||||
2025-03-18 19:49:00,1900.8,1901.52,1900.22,1901.51,346.4304
|
|
||||||
2025-03-18 19:50:00,1901.51,1904.92,1901.51,1904.09,692.0082
|
|
||||||
2025-03-18 19:51:00,1904.08,1907.58,1903.51,1907.2,516.8693
|
|
||||||
2025-03-18 19:52:00,1907.21,1908.39,1906.62,1908.16,342.4143
|
|
||||||
2025-03-18 19:53:00,1908.16,1908.16,1905.67,1905.77,209.6927
|
|
||||||
2025-03-18 19:54:00,1905.77,1906.28,1903.81,1904.65,2449.0353
|
|
||||||
2025-03-18 19:55:00,1904.66,1905.53,1902.6,1903.4,215.3078
|
|
||||||
2025-03-18 19:56:00,1903.4,1905.7,1903.4,1904.81,376.8713
|
|
||||||
2025-03-18 19:57:00,1904.82,1907.33,1904.81,1906.3,178.0359
|
|
||||||
2025-03-18 19:58:00,1906.31,1908.0,1906.3,1907.01,166.0569
|
|
||||||
2025-03-18 19:59:00,1906.94,1906.94,1903.01,1904.57,243.2851
|
|
||||||
2025-03-18 20:00:00,1904.57,1906.48,1904.5,1905.87,203.6118
|
|
||||||
2025-03-18 20:01:00,1905.87,1906.57,1905.59,1906.05,80.9375
|
|
||||||
2025-03-18 20:02:00,1906.04,1906.05,1904.85,1905.23,65.5725
|
|
||||||
2025-03-18 20:03:00,1905.24,1905.49,1904.36,1904.65,90.7889
|
|
||||||
2025-03-18 20:04:00,1904.64,1906.69,1904.64,1906.69,234.7526
|
|
||||||
2025-03-18 20:05:00,1906.69,1907.56,1906.57,1906.64,147.4473
|
|
||||||
2025-03-18 20:06:00,1906.65,1906.87,1905.36,1906.56,79.5244
|
|
||||||
2025-03-18 20:07:00,1906.56,1907.39,1905.85,1907.01,109.2926
|
|
||||||
2025-03-18 20:08:00,1907.0,1909.7,1907.0,1909.7,407.776
|
|
||||||
2025-03-18 20:09:00,1909.7,1910.89,1908.58,1908.9,437.7785
|
|
||||||
2025-03-18 20:10:00,1908.9,1909.43,1907.38,1907.39,155.8304
|
|
||||||
2025-03-18 20:11:00,1907.39,1907.39,1906.95,1906.95,110.6238
|
|
||||||
2025-03-18 20:12:00,1906.95,1906.96,1905.86,1906.4,117.1242
|
|
||||||
2025-03-18 20:13:00,1906.4,1906.76,1905.78,1905.79,81.0731
|
|
||||||
2025-03-18 20:14:00,1905.79,1906.22,1904.54,1904.54,74.4926
|
|
||||||
2025-03-18 20:15:00,1904.54,1905.9,1904.54,1905.22,110.8886
|
|
||||||
2025-03-18 20:16:00,1905.23,1905.23,1903.45,1903.83,205.3811
|
|
||||||
2025-03-18 20:17:00,1903.84,1904.27,1903.35,1903.35,99.6946
|
|
||||||
2025-03-18 20:18:00,1903.36,1904.55,1903.35,1904.54,93.2532
|
|
||||||
2025-03-18 20:19:00,1904.54,1905.3,1904.41,1905.03,30.686
|
|
||||||
2025-03-18 20:20:00,1905.03,1905.37,1903.85,1903.96,73.5074
|
|
||||||
2025-03-18 20:21:00,1903.97,1904.74,1903.61,1903.87,131.7219
|
|
||||||
2025-03-18 20:22:00,1903.85,1904.55,1903.35,1903.52,70.1069
|
|
||||||
2025-03-18 20:23:00,1903.52,1903.53,1901.35,1901.49,107.1871
|
|
||||||
2025-03-18 20:24:00,1901.49,1902.5,1901.42,1902.31,101.8475
|
|
||||||
2025-03-18 20:25:00,1902.3,1902.94,1902.3,1902.93,70.0326
|
|
||||||
2025-03-18 20:26:00,1902.93,1902.94,1901.55,1902.2,62.7905
|
|
||||||
2025-03-18 20:27:00,1902.21,1902.21,1900.5,1900.5,140.8982
|
|
||||||
2025-03-18 20:28:00,1900.51,1901.51,1900.51,1901.51,59.5334
|
|
||||||
2025-03-18 20:29:00,1901.51,1901.51,1900.5,1900.5,52.2076
|
|
||||||
2025-03-18 20:30:00,1900.5,1901.39,1900.5,1901.02,75.277
|
|
||||||
2025-03-18 20:31:00,1901.03,1901.03,1898.65,1899.29,181.9307
|
|
||||||
2025-03-18 20:32:00,1899.29,1899.65,1899.29,1899.64,52.8868
|
|
||||||
2025-03-18 20:33:00,1899.65,1900.8,1899.64,1900.8,78.9097
|
|
||||||
2025-03-18 20:34:00,1900.8,1900.8,1900.07,1900.07,59.5749
|
|
||||||
2025-03-18 20:35:00,1900.08,1901.76,1900.07,1901.35,150.3715
|
|
||||||
2025-03-18 20:36:00,1901.35,1901.36,1900.0,1900.21,94.8679
|
|
||||||
2025-03-18 20:37:00,1900.21,1901.52,1900.02,1901.46,73.592
|
|
||||||
2025-03-18 20:38:00,1901.46,1902.0,1900.7,1900.75,83.9261
|
|
||||||
2025-03-18 20:39:00,1900.75,1901.88,1900.74,1901.22,92.1371
|
|
||||||
2025-03-18 20:40:00,1901.23,1902.17,1900.89,1901.34,119.7635
|
|
||||||
2025-03-18 20:41:00,1901.35,1902.33,1901.34,1902.08,92.4759
|
|
||||||
2025-03-18 20:42:00,1902.09,1902.79,1902.08,1902.35,86.565
|
|
||||||
2025-03-18 20:43:00,1902.35,1902.53,1902.13,1902.14,48.4733
|
|
||||||
2025-03-18 20:44:00,1902.13,1902.41,1901.08,1901.09,68.3703
|
|
||||||
2025-03-18 20:45:00,1901.08,1901.52,1899.7,1900.14,76.3088
|
|
||||||
2025-03-18 20:46:00,1900.14,1901.16,1900.14,1901.0,60.3394
|
|
||||||
2025-03-18 20:47:00,1901.0,1904.09,1901.0,1903.86,224.0292
|
|
||||||
2025-03-18 20:48:00,1903.85,1904.21,1903.73,1903.73,53.8925
|
|
||||||
2025-03-18 20:49:00,1903.73,1903.97,1903.33,1903.54,71.4889
|
|
||||||
2025-03-18 20:50:00,1903.54,1903.9,1903.34,1903.9,25.1744
|
|
||||||
2025-03-18 20:51:00,1903.9,1903.9,1902.96,1903.15,68.605
|
|
||||||
2025-03-18 20:52:00,1903.14,1904.55,1903.14,1904.54,76.8473
|
|
||||||
2025-03-18 20:53:00,1904.54,1905.84,1904.54,1905.84,77.8222
|
|
||||||
2025-03-18 20:54:00,1905.83,1906.28,1905.79,1905.84,84.7543
|
|
||||||
2025-03-18 20:55:00,1905.84,1905.84,1905.0,1905.46,37.5948
|
|
||||||
2025-03-18 20:56:00,1905.46,1906.0,1904.91,1906.0,58.8875
|
|
||||||
2025-03-18 20:57:00,1906.0,1906.0,1905.12,1905.84,127.7048
|
|
||||||
2025-03-18 20:58:00,1905.84,1906.1,1905.66,1905.92,105.724
|
|
||||||
2025-03-18 20:59:00,1905.93,1906.57,1905.83,1906.56,58.4286
|
|
||||||
2025-03-18 21:00:00,1906.56,1907.0,1905.72,1906.92,80.4943
|
|
||||||
2025-03-18 21:01:00,1906.93,1907.0,1906.64,1906.86,51.1358
|
|
||||||
2025-03-18 21:02:00,1906.86,1907.11,1906.16,1907.11,105.1664
|
|
||||||
2025-03-18 21:03:00,1907.11,1907.68,1907.1,1907.67,69.3371
|
|
||||||
2025-03-18 21:04:00,1907.66,1907.67,1906.37,1906.8,92.0782
|
|
||||||
2025-03-18 21:05:00,1906.8,1907.97,1906.8,1907.97,179.8733
|
|
||||||
2025-03-18 21:06:00,1907.97,1908.39,1907.53,1908.39,44.4455
|
|
||||||
2025-03-18 21:07:00,1908.39,1909.02,1907.91,1908.29,187.4497
|
|
||||||
2025-03-18 21:08:00,1908.29,1909.97,1908.29,1909.76,155.8429
|
|
||||||
2025-03-18 21:09:00,1909.76,1909.76,1909.25,1909.35,96.177
|
|
||||||
2025-03-18 21:10:00,1909.36,1909.49,1908.8,1908.81,154.1635
|
|
||||||
2025-03-18 21:11:00,1908.83,1908.87,1908.26,1908.58,75.4739
|
|
||||||
2025-03-18 21:12:00,1908.59,1911.0,1908.58,1909.92,301.3996
|
|
||||||
2025-03-18 21:13:00,1909.93,1915.0,1909.59,1913.03,2676.8215
|
|
||||||
2025-03-18 21:14:00,1913.03,1914.58,1912.5,1913.43,483.5489
|
|
||||||
2025-03-18 21:15:00,1913.43,1913.43,1912.17,1912.9,62.1588
|
|
||||||
2025-03-18 21:16:00,1912.89,1913.71,1912.62,1912.83,107.7098
|
|
||||||
2025-03-18 21:17:00,1912.84,1912.96,1911.99,1912.31,48.8551
|
|
||||||
2025-03-18 21:18:00,1912.3,1912.63,1912.17,1912.55,56.2907
|
|
||||||
2025-03-18 21:19:00,1912.55,1913.69,1912.41,1913.69,114.7601
|
|
||||||
2025-03-18 21:20:00,1913.68,1914.88,1912.76,1912.99,174.8567
|
|
||||||
2025-03-18 21:21:00,1912.99,1913.13,1912.79,1913.12,79.5759
|
|
||||||
2025-03-18 21:22:00,1913.12,1913.12,1912.41,1912.41,76.2874
|
|
||||||
2025-03-18 21:23:00,1912.41,1912.42,1911.62,1911.63,71.3163
|
|
||||||
2025-03-18 21:24:00,1911.62,1912.0,1911.62,1911.99,68.1546
|
|
||||||
2025-03-18 21:25:00,1912.0,1912.44,1911.77,1911.77,131.132
|
|
||||||
2025-03-18 21:26:00,1911.78,1912.41,1911.77,1912.4,92.154
|
|
||||||
2025-03-18 21:27:00,1912.41,1912.41,1912.16,1912.16,74.0113
|
|
||||||
2025-03-18 21:28:00,1912.16,1912.17,1911.82,1911.83,81.0433
|
|
||||||
2025-03-18 21:29:00,1911.83,1912.2,1911.82,1912.19,74.2521
|
|
||||||
2025-03-18 21:30:00,1912.19,1912.2,1911.82,1911.82,42.4877
|
|
||||||
2025-03-18 21:31:00,1911.83,1912.2,1911.82,1912.16,88.1787
|
|
||||||
2025-03-18 21:32:00,1912.16,1912.17,1911.53,1911.53,193.9941
|
|
||||||
2025-03-18 21:33:00,1911.54,1911.54,1909.39,1910.18,177.425
|
|
||||||
2025-03-18 21:34:00,1910.17,1911.17,1910.17,1911.17,91.9058
|
|
||||||
2025-03-18 21:35:00,1911.17,1911.17,1909.23,1909.39,120.1061
|
|
||||||
2025-03-18 21:36:00,1909.39,1910.02,1909.01,1910.02,41.3081
|
|
|
4502
data/btc_usdt_15m.json
Normal file
4502
data/btc_usdt_15m.json
Normal file
File diff suppressed because it is too large
Load Diff
902
data/eth_usdt_1h.json
Normal file
902
data/eth_usdt_1h.json
Normal file
@ -0,0 +1,902 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"timestamp": 1743782400000,
|
||||||
|
"datetime": "2025-04-04 19:00:00",
|
||||||
|
"open": 1795.24,
|
||||||
|
"high": 1805.4,
|
||||||
|
"low": 1783.25,
|
||||||
|
"close": 1797.57,
|
||||||
|
"volume": 31964.8878
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743786000000,
|
||||||
|
"datetime": "2025-04-04 20:00:00",
|
||||||
|
"open": 1797.57,
|
||||||
|
"high": 1828.9,
|
||||||
|
"low": 1788.0,
|
||||||
|
"close": 1821.35,
|
||||||
|
"volume": 35573.0452
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743789600000,
|
||||||
|
"datetime": "2025-04-04 21:00:00",
|
||||||
|
"open": 1821.34,
|
||||||
|
"high": 1823.27,
|
||||||
|
"low": 1804.76,
|
||||||
|
"close": 1806.47,
|
||||||
|
"volume": 15351.8715
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743793200000,
|
||||||
|
"datetime": "2025-04-04 22:00:00",
|
||||||
|
"open": 1806.46,
|
||||||
|
"high": 1815.78,
|
||||||
|
"low": 1802.16,
|
||||||
|
"close": 1810.21,
|
||||||
|
"volume": 16894.3278
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743796800000,
|
||||||
|
"datetime": "2025-04-04 23:00:00",
|
||||||
|
"open": 1810.21,
|
||||||
|
"high": 1822.99,
|
||||||
|
"low": 1807.52,
|
||||||
|
"close": 1818.94,
|
||||||
|
"volume": 14972.0714
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743800400000,
|
||||||
|
"datetime": "2025-04-05 00:00:00",
|
||||||
|
"open": 1818.93,
|
||||||
|
"high": 1827.99,
|
||||||
|
"low": 1812.82,
|
||||||
|
"close": 1818.2,
|
||||||
|
"volume": 14130.4212
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743804000000,
|
||||||
|
"datetime": "2025-04-05 01:00:00",
|
||||||
|
"open": 1818.18,
|
||||||
|
"high": 1826.43,
|
||||||
|
"low": 1815.82,
|
||||||
|
"close": 1819.24,
|
||||||
|
"volume": 5753.8435
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743807600000,
|
||||||
|
"datetime": "2025-04-05 02:00:00",
|
||||||
|
"open": 1819.24,
|
||||||
|
"high": 1820.15,
|
||||||
|
"low": 1809.56,
|
||||||
|
"close": 1816.87,
|
||||||
|
"volume": 5836.4157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743811200000,
|
||||||
|
"datetime": "2025-04-05 03:00:00",
|
||||||
|
"open": 1816.88,
|
||||||
|
"high": 1825.0,
|
||||||
|
"low": 1812.55,
|
||||||
|
"close": 1822.85,
|
||||||
|
"volume": 6985.5404
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743814800000,
|
||||||
|
"datetime": "2025-04-05 04:00:00",
|
||||||
|
"open": 1822.86,
|
||||||
|
"high": 1827.29,
|
||||||
|
"low": 1819.26,
|
||||||
|
"close": 1819.49,
|
||||||
|
"volume": 8257.7673
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743818400000,
|
||||||
|
"datetime": "2025-04-05 05:00:00",
|
||||||
|
"open": 1819.5,
|
||||||
|
"high": 1821.38,
|
||||||
|
"low": 1812.89,
|
||||||
|
"close": 1815.6,
|
||||||
|
"volume": 4967.4994
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743822000000,
|
||||||
|
"datetime": "2025-04-05 06:00:00",
|
||||||
|
"open": 1815.61,
|
||||||
|
"high": 1816.24,
|
||||||
|
"low": 1809.39,
|
||||||
|
"close": 1810.01,
|
||||||
|
"volume": 4749.6708
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743825600000,
|
||||||
|
"datetime": "2025-04-05 07:00:00",
|
||||||
|
"open": 1810.01,
|
||||||
|
"high": 1815.75,
|
||||||
|
"low": 1807.45,
|
||||||
|
"close": 1808.5,
|
||||||
|
"volume": 4544.9387
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743829200000,
|
||||||
|
"datetime": "2025-04-05 08:00:00",
|
||||||
|
"open": 1808.5,
|
||||||
|
"high": 1815.41,
|
||||||
|
"low": 1807.13,
|
||||||
|
"close": 1810.01,
|
||||||
|
"volume": 3059.9706
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743832800000,
|
||||||
|
"datetime": "2025-04-05 09:00:00",
|
||||||
|
"open": 1810.01,
|
||||||
|
"high": 1813.7,
|
||||||
|
"low": 1803.99,
|
||||||
|
"close": 1809.56,
|
||||||
|
"volume": 7296.6856
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743836400000,
|
||||||
|
"datetime": "2025-04-05 10:00:00",
|
||||||
|
"open": 1809.55,
|
||||||
|
"high": 1814.53,
|
||||||
|
"low": 1807.05,
|
||||||
|
"close": 1813.22,
|
||||||
|
"volume": 4620.163
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743840000000,
|
||||||
|
"datetime": "2025-04-05 11:00:00",
|
||||||
|
"open": 1813.22,
|
||||||
|
"high": 1815.23,
|
||||||
|
"low": 1810.0,
|
||||||
|
"close": 1813.83,
|
||||||
|
"volume": 4476.4924
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743843600000,
|
||||||
|
"datetime": "2025-04-05 12:00:00",
|
||||||
|
"open": 1813.82,
|
||||||
|
"high": 1822.04,
|
||||||
|
"low": 1813.82,
|
||||||
|
"close": 1820.78,
|
||||||
|
"volume": 7103.2191
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743847200000,
|
||||||
|
"datetime": "2025-04-05 13:00:00",
|
||||||
|
"open": 1820.79,
|
||||||
|
"high": 1821.57,
|
||||||
|
"low": 1816.33,
|
||||||
|
"close": 1818.12,
|
||||||
|
"volume": 4011.0135
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743850800000,
|
||||||
|
"datetime": "2025-04-05 14:00:00",
|
||||||
|
"open": 1818.13,
|
||||||
|
"high": 1819.39,
|
||||||
|
"low": 1806.56,
|
||||||
|
"close": 1807.34,
|
||||||
|
"volume": 12103.2066
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743854400000,
|
||||||
|
"datetime": "2025-04-05 15:00:00",
|
||||||
|
"open": 1807.34,
|
||||||
|
"high": 1808.3,
|
||||||
|
"low": 1792.63,
|
||||||
|
"close": 1797.88,
|
||||||
|
"volume": 11631.4278
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743858000000,
|
||||||
|
"datetime": "2025-04-05 16:00:00",
|
||||||
|
"open": 1797.88,
|
||||||
|
"high": 1798.62,
|
||||||
|
"low": 1783.24,
|
||||||
|
"close": 1787.04,
|
||||||
|
"volume": 14786.6361
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743861600000,
|
||||||
|
"datetime": "2025-04-05 17:00:00",
|
||||||
|
"open": 1787.03,
|
||||||
|
"high": 1795.23,
|
||||||
|
"low": 1786.39,
|
||||||
|
"close": 1790.8,
|
||||||
|
"volume": 10687.44
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743865200000,
|
||||||
|
"datetime": "2025-04-05 18:00:00",
|
||||||
|
"open": 1790.79,
|
||||||
|
"high": 1790.8,
|
||||||
|
"low": 1777.93,
|
||||||
|
"close": 1782.01,
|
||||||
|
"volume": 20233.6157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743868800000,
|
||||||
|
"datetime": "2025-04-05 19:00:00",
|
||||||
|
"open": 1782.01,
|
||||||
|
"high": 1788.41,
|
||||||
|
"low": 1780.23,
|
||||||
|
"close": 1784.6,
|
||||||
|
"volume": 9400.3796
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743872400000,
|
||||||
|
"datetime": "2025-04-05 20:00:00",
|
||||||
|
"open": 1784.6,
|
||||||
|
"high": 1790.11,
|
||||||
|
"low": 1782.44,
|
||||||
|
"close": 1787.46,
|
||||||
|
"volume": 3547.834
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743876000000,
|
||||||
|
"datetime": "2025-04-05 21:00:00",
|
||||||
|
"open": 1787.45,
|
||||||
|
"high": 1794.51,
|
||||||
|
"low": 1787.08,
|
||||||
|
"close": 1793.21,
|
||||||
|
"volume": 3443.5743
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743879600000,
|
||||||
|
"datetime": "2025-04-05 22:00:00",
|
||||||
|
"open": 1793.21,
|
||||||
|
"high": 1795.38,
|
||||||
|
"low": 1780.0,
|
||||||
|
"close": 1782.17,
|
||||||
|
"volume": 4497.8488
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743883200000,
|
||||||
|
"datetime": "2025-04-05 23:00:00",
|
||||||
|
"open": 1782.17,
|
||||||
|
"high": 1805.06,
|
||||||
|
"low": 1764.39,
|
||||||
|
"close": 1790.6,
|
||||||
|
"volume": 32600.3773
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743886800000,
|
||||||
|
"datetime": "2025-04-06 00:00:00",
|
||||||
|
"open": 1790.61,
|
||||||
|
"high": 1795.23,
|
||||||
|
"low": 1784.76,
|
||||||
|
"close": 1790.3,
|
||||||
|
"volume": 7005.3539
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743890400000,
|
||||||
|
"datetime": "2025-04-06 01:00:00",
|
||||||
|
"open": 1790.3,
|
||||||
|
"high": 1797.81,
|
||||||
|
"low": 1790.3,
|
||||||
|
"close": 1797.31,
|
||||||
|
"volume": 5740.9293
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743894000000,
|
||||||
|
"datetime": "2025-04-06 02:00:00",
|
||||||
|
"open": 1797.31,
|
||||||
|
"high": 1810.04,
|
||||||
|
"low": 1796.8,
|
||||||
|
"close": 1806.01,
|
||||||
|
"volume": 5964.6854
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743897600000,
|
||||||
|
"datetime": "2025-04-06 03:00:00",
|
||||||
|
"open": 1806.02,
|
||||||
|
"high": 1813.91,
|
||||||
|
"low": 1803.06,
|
||||||
|
"close": 1810.45,
|
||||||
|
"volume": 7891.5534
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743901200000,
|
||||||
|
"datetime": "2025-04-06 04:00:00",
|
||||||
|
"open": 1810.45,
|
||||||
|
"high": 1817.0,
|
||||||
|
"low": 1807.29,
|
||||||
|
"close": 1810.5,
|
||||||
|
"volume": 6984.8359
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743904800000,
|
||||||
|
"datetime": "2025-04-06 05:00:00",
|
||||||
|
"open": 1810.5,
|
||||||
|
"high": 1813.53,
|
||||||
|
"low": 1804.3,
|
||||||
|
"close": 1804.3,
|
||||||
|
"volume": 4812.4524
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743908400000,
|
||||||
|
"datetime": "2025-04-06 06:00:00",
|
||||||
|
"open": 1804.3,
|
||||||
|
"high": 1809.81,
|
||||||
|
"low": 1801.78,
|
||||||
|
"close": 1809.81,
|
||||||
|
"volume": 5155.0887
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743912000000,
|
||||||
|
"datetime": "2025-04-06 07:00:00",
|
||||||
|
"open": 1809.81,
|
||||||
|
"high": 1811.21,
|
||||||
|
"low": 1806.06,
|
||||||
|
"close": 1809.72,
|
||||||
|
"volume": 4452.4849
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743915600000,
|
||||||
|
"datetime": "2025-04-06 08:00:00",
|
||||||
|
"open": 1809.73,
|
||||||
|
"high": 1813.55,
|
||||||
|
"low": 1807.0,
|
||||||
|
"close": 1811.18,
|
||||||
|
"volume": 6840.9182
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743919200000,
|
||||||
|
"datetime": "2025-04-06 09:00:00",
|
||||||
|
"open": 1811.19,
|
||||||
|
"high": 1812.6,
|
||||||
|
"low": 1798.54,
|
||||||
|
"close": 1798.55,
|
||||||
|
"volume": 7755.1953
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743922800000,
|
||||||
|
"datetime": "2025-04-06 10:00:00",
|
||||||
|
"open": 1798.55,
|
||||||
|
"high": 1801.59,
|
||||||
|
"low": 1791.49,
|
||||||
|
"close": 1797.69,
|
||||||
|
"volume": 9057.9671
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743926400000,
|
||||||
|
"datetime": "2025-04-06 11:00:00",
|
||||||
|
"open": 1797.7,
|
||||||
|
"high": 1798.2,
|
||||||
|
"low": 1783.0,
|
||||||
|
"close": 1795.39,
|
||||||
|
"volume": 12880.3696
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743930000000,
|
||||||
|
"datetime": "2025-04-06 12:00:00",
|
||||||
|
"open": 1795.38,
|
||||||
|
"high": 1797.01,
|
||||||
|
"low": 1789.49,
|
||||||
|
"close": 1791.08,
|
||||||
|
"volume": 7653.3803
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743933600000,
|
||||||
|
"datetime": "2025-04-06 13:00:00",
|
||||||
|
"open": 1791.08,
|
||||||
|
"high": 1792.28,
|
||||||
|
"low": 1785.1,
|
||||||
|
"close": 1790.1,
|
||||||
|
"volume": 9301.1994
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743937200000,
|
||||||
|
"datetime": "2025-04-06 14:00:00",
|
||||||
|
"open": 1790.1,
|
||||||
|
"high": 1791.38,
|
||||||
|
"low": 1783.75,
|
||||||
|
"close": 1789.19,
|
||||||
|
"volume": 7121.1257
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743940800000,
|
||||||
|
"datetime": "2025-04-06 15:00:00",
|
||||||
|
"open": 1789.19,
|
||||||
|
"high": 1792.09,
|
||||||
|
"low": 1777.04,
|
||||||
|
"close": 1777.98,
|
||||||
|
"volume": 10803.8432
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743944400000,
|
||||||
|
"datetime": "2025-04-06 16:00:00",
|
||||||
|
"open": 1777.98,
|
||||||
|
"high": 1781.0,
|
||||||
|
"low": 1735.0,
|
||||||
|
"close": 1748.54,
|
||||||
|
"volume": 64647.0465
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743948000000,
|
||||||
|
"datetime": "2025-04-06 17:00:00",
|
||||||
|
"open": 1748.54,
|
||||||
|
"high": 1771.0,
|
||||||
|
"low": 1744.85,
|
||||||
|
"close": 1770.02,
|
||||||
|
"volume": 30744.1724
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743951600000,
|
||||||
|
"datetime": "2025-04-06 18:00:00",
|
||||||
|
"open": 1770.02,
|
||||||
|
"high": 1773.78,
|
||||||
|
"low": 1758.61,
|
||||||
|
"close": 1759.55,
|
||||||
|
"volume": 19989.653
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743955200000,
|
||||||
|
"datetime": "2025-04-06 19:00:00",
|
||||||
|
"open": 1759.56,
|
||||||
|
"high": 1760.06,
|
||||||
|
"low": 1717.11,
|
||||||
|
"close": 1731.28,
|
||||||
|
"volume": 33893.8713
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743958800000,
|
||||||
|
"datetime": "2025-04-06 20:00:00",
|
||||||
|
"open": 1731.28,
|
||||||
|
"high": 1734.0,
|
||||||
|
"low": 1669.4,
|
||||||
|
"close": 1684.77,
|
||||||
|
"volume": 98997.6428
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743962400000,
|
||||||
|
"datetime": "2025-04-06 21:00:00",
|
||||||
|
"open": 1684.77,
|
||||||
|
"high": 1684.96,
|
||||||
|
"low": 1602.0,
|
||||||
|
"close": 1618.28,
|
||||||
|
"volume": 168635.5013
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743966000000,
|
||||||
|
"datetime": "2025-04-06 22:00:00",
|
||||||
|
"open": 1618.29,
|
||||||
|
"high": 1635.96,
|
||||||
|
"low": 1608.75,
|
||||||
|
"close": 1626.24,
|
||||||
|
"volume": 93220.5508
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743969600000,
|
||||||
|
"datetime": "2025-04-06 23:00:00",
|
||||||
|
"open": 1626.24,
|
||||||
|
"high": 1631.18,
|
||||||
|
"low": 1555.0,
|
||||||
|
"close": 1574.65,
|
||||||
|
"volume": 118688.9021
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743973200000,
|
||||||
|
"datetime": "2025-04-07 00:00:00",
|
||||||
|
"open": 1574.66,
|
||||||
|
"high": 1597.96,
|
||||||
|
"low": 1571.04,
|
||||||
|
"close": 1587.57,
|
||||||
|
"volume": 41344.627
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743976800000,
|
||||||
|
"datetime": "2025-04-07 01:00:00",
|
||||||
|
"open": 1587.57,
|
||||||
|
"high": 1608.33,
|
||||||
|
"low": 1563.86,
|
||||||
|
"close": 1576.99,
|
||||||
|
"volume": 92388.0761
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743980400000,
|
||||||
|
"datetime": "2025-04-07 02:00:00",
|
||||||
|
"open": 1576.98,
|
||||||
|
"high": 1584.64,
|
||||||
|
"low": 1537.5,
|
||||||
|
"close": 1580.76,
|
||||||
|
"volume": 116586.6384
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743984000000,
|
||||||
|
"datetime": "2025-04-07 03:00:00",
|
||||||
|
"open": 1580.77,
|
||||||
|
"high": 1586.41,
|
||||||
|
"low": 1552.88,
|
||||||
|
"close": 1572.9,
|
||||||
|
"volume": 58777.8057
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743987600000,
|
||||||
|
"datetime": "2025-04-07 04:00:00",
|
||||||
|
"open": 1572.97,
|
||||||
|
"high": 1614.2,
|
||||||
|
"low": 1570.7,
|
||||||
|
"close": 1598.6,
|
||||||
|
"volume": 55450.3954
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743991200000,
|
||||||
|
"datetime": "2025-04-07 05:00:00",
|
||||||
|
"open": 1598.59,
|
||||||
|
"high": 1601.52,
|
||||||
|
"low": 1573.04,
|
||||||
|
"close": 1579.29,
|
||||||
|
"volume": 29148.5226
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743994800000,
|
||||||
|
"datetime": "2025-04-07 06:00:00",
|
||||||
|
"open": 1579.28,
|
||||||
|
"high": 1582.69,
|
||||||
|
"low": 1520.78,
|
||||||
|
"close": 1543.69,
|
||||||
|
"volume": 88093.6637
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1743998400000,
|
||||||
|
"datetime": "2025-04-07 07:00:00",
|
||||||
|
"open": 1543.69,
|
||||||
|
"high": 1559.13,
|
||||||
|
"low": 1534.0,
|
||||||
|
"close": 1548.39,
|
||||||
|
"volume": 56828.5825
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744002000000,
|
||||||
|
"datetime": "2025-04-07 08:00:00",
|
||||||
|
"open": 1548.4,
|
||||||
|
"high": 1561.41,
|
||||||
|
"low": 1536.62,
|
||||||
|
"close": 1540.37,
|
||||||
|
"volume": 55264.3925
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744005600000,
|
||||||
|
"datetime": "2025-04-07 09:00:00",
|
||||||
|
"open": 1540.37,
|
||||||
|
"high": 1552.67,
|
||||||
|
"low": 1411.01,
|
||||||
|
"close": 1431.29,
|
||||||
|
"volume": 335133.7731
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744009200000,
|
||||||
|
"datetime": "2025-04-07 10:00:00",
|
||||||
|
"open": 1431.45,
|
||||||
|
"high": 1477.64,
|
||||||
|
"low": 1431.23,
|
||||||
|
"close": 1460.15,
|
||||||
|
"volume": 159485.0623
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744012800000,
|
||||||
|
"datetime": "2025-04-07 11:00:00",
|
||||||
|
"open": 1460.15,
|
||||||
|
"high": 1511.32,
|
||||||
|
"low": 1455.63,
|
||||||
|
"close": 1495.94,
|
||||||
|
"volume": 111723.8108
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744016400000,
|
||||||
|
"datetime": "2025-04-07 12:00:00",
|
||||||
|
"open": 1495.95,
|
||||||
|
"high": 1505.93,
|
||||||
|
"low": 1476.75,
|
||||||
|
"close": 1484.46,
|
||||||
|
"volume": 56384.7066
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744020000000,
|
||||||
|
"datetime": "2025-04-07 13:00:00",
|
||||||
|
"open": 1484.45,
|
||||||
|
"high": 1520.83,
|
||||||
|
"low": 1481.95,
|
||||||
|
"close": 1491.35,
|
||||||
|
"volume": 79408.5782
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744023600000,
|
||||||
|
"datetime": "2025-04-07 14:00:00",
|
||||||
|
"open": 1491.36,
|
||||||
|
"high": 1503.95,
|
||||||
|
"low": 1474.61,
|
||||||
|
"close": 1483.11,
|
||||||
|
"volume": 41650.7899
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744027200000,
|
||||||
|
"datetime": "2025-04-07 15:00:00",
|
||||||
|
"open": 1483.11,
|
||||||
|
"high": 1526.98,
|
||||||
|
"low": 1478.12,
|
||||||
|
"close": 1521.03,
|
||||||
|
"volume": 91298.0532
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744030800000,
|
||||||
|
"datetime": "2025-04-07 16:00:00",
|
||||||
|
"open": 1521.02,
|
||||||
|
"high": 1558.34,
|
||||||
|
"low": 1487.19,
|
||||||
|
"close": 1550.47,
|
||||||
|
"volume": 140311.7317
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744034400000,
|
||||||
|
"datetime": "2025-04-07 17:00:00",
|
||||||
|
"open": 1550.46,
|
||||||
|
"high": 1639.0,
|
||||||
|
"low": 1539.19,
|
||||||
|
"close": 1569.62,
|
||||||
|
"volume": 403208.1414
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744038000000,
|
||||||
|
"datetime": "2025-04-07 18:00:00",
|
||||||
|
"open": 1569.64,
|
||||||
|
"high": 1577.5,
|
||||||
|
"low": 1533.94,
|
||||||
|
"close": 1556.56,
|
||||||
|
"volume": 111083.087
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744041600000,
|
||||||
|
"datetime": "2025-04-07 19:00:00",
|
||||||
|
"open": 1556.56,
|
||||||
|
"high": 1557.61,
|
||||||
|
"low": 1526.0,
|
||||||
|
"close": 1529.18,
|
||||||
|
"volume": 51859.4908
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744045200000,
|
||||||
|
"datetime": "2025-04-07 20:00:00",
|
||||||
|
"open": 1529.18,
|
||||||
|
"high": 1572.7,
|
||||||
|
"low": 1529.17,
|
||||||
|
"close": 1566.15,
|
||||||
|
"volume": 73082.1237
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744048800000,
|
||||||
|
"datetime": "2025-04-07 21:00:00",
|
||||||
|
"open": 1566.15,
|
||||||
|
"high": 1572.0,
|
||||||
|
"low": 1546.93,
|
||||||
|
"close": 1549.41,
|
||||||
|
"volume": 46023.7822
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744052400000,
|
||||||
|
"datetime": "2025-04-07 22:00:00",
|
||||||
|
"open": 1549.42,
|
||||||
|
"high": 1562.78,
|
||||||
|
"low": 1542.25,
|
||||||
|
"close": 1545.41,
|
||||||
|
"volume": 40539.4252
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744056000000,
|
||||||
|
"datetime": "2025-04-07 23:00:00",
|
||||||
|
"open": 1545.47,
|
||||||
|
"high": 1579.95,
|
||||||
|
"low": 1545.06,
|
||||||
|
"close": 1570.94,
|
||||||
|
"volume": 31675.3445
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744059600000,
|
||||||
|
"datetime": "2025-04-08 00:00:00",
|
||||||
|
"open": 1570.94,
|
||||||
|
"high": 1576.93,
|
||||||
|
"low": 1555.0,
|
||||||
|
"close": 1568.64,
|
||||||
|
"volume": 16065.3977
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744063200000,
|
||||||
|
"datetime": "2025-04-08 01:00:00",
|
||||||
|
"open": 1568.67,
|
||||||
|
"high": 1585.5,
|
||||||
|
"low": 1565.58,
|
||||||
|
"close": 1578.78,
|
||||||
|
"volume": 16816.2641
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744066800000,
|
||||||
|
"datetime": "2025-04-08 02:00:00",
|
||||||
|
"open": 1578.78,
|
||||||
|
"high": 1582.48,
|
||||||
|
"low": 1551.52,
|
||||||
|
"close": 1553.04,
|
||||||
|
"volume": 19483.0819
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744070400000,
|
||||||
|
"datetime": "2025-04-08 03:00:00",
|
||||||
|
"open": 1553.04,
|
||||||
|
"high": 1563.3,
|
||||||
|
"low": 1544.43,
|
||||||
|
"close": 1544.73,
|
||||||
|
"volume": 17942.7729
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744074000000,
|
||||||
|
"datetime": "2025-04-08 04:00:00",
|
||||||
|
"open": 1544.73,
|
||||||
|
"high": 1594.0,
|
||||||
|
"low": 1541.04,
|
||||||
|
"close": 1591.32,
|
||||||
|
"volume": 37588.4409
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744077600000,
|
||||||
|
"datetime": "2025-04-08 05:00:00",
|
||||||
|
"open": 1591.31,
|
||||||
|
"high": 1618.67,
|
||||||
|
"low": 1586.78,
|
||||||
|
"close": 1589.69,
|
||||||
|
"volume": 37899.177
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744081200000,
|
||||||
|
"datetime": "2025-04-08 06:00:00",
|
||||||
|
"open": 1589.7,
|
||||||
|
"high": 1593.0,
|
||||||
|
"low": 1579.36,
|
||||||
|
"close": 1584.71,
|
||||||
|
"volume": 14840.1461
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744084800000,
|
||||||
|
"datetime": "2025-04-08 07:00:00",
|
||||||
|
"open": 1584.71,
|
||||||
|
"high": 1602.27,
|
||||||
|
"low": 1583.84,
|
||||||
|
"close": 1598.79,
|
||||||
|
"volume": 18941.1732
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744088400000,
|
||||||
|
"datetime": "2025-04-08 08:00:00",
|
||||||
|
"open": 1598.78,
|
||||||
|
"high": 1602.22,
|
||||||
|
"low": 1582.08,
|
||||||
|
"close": 1591.83,
|
||||||
|
"volume": 17270.0426
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744092000000,
|
||||||
|
"datetime": "2025-04-08 09:00:00",
|
||||||
|
"open": 1591.82,
|
||||||
|
"high": 1591.82,
|
||||||
|
"low": 1574.34,
|
||||||
|
"close": 1576.25,
|
||||||
|
"volume": 17105.3362
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744095600000,
|
||||||
|
"datetime": "2025-04-08 10:00:00",
|
||||||
|
"open": 1576.28,
|
||||||
|
"high": 1578.55,
|
||||||
|
"low": 1561.89,
|
||||||
|
"close": 1570.83,
|
||||||
|
"volume": 36725.063
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744099200000,
|
||||||
|
"datetime": "2025-04-08 11:00:00",
|
||||||
|
"open": 1570.83,
|
||||||
|
"high": 1571.49,
|
||||||
|
"low": 1554.37,
|
||||||
|
"close": 1566.95,
|
||||||
|
"volume": 18937.8681
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744102800000,
|
||||||
|
"datetime": "2025-04-08 12:00:00",
|
||||||
|
"open": 1566.95,
|
||||||
|
"high": 1575.93,
|
||||||
|
"low": 1564.48,
|
||||||
|
"close": 1568.23,
|
||||||
|
"volume": 16319.6933
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744106400000,
|
||||||
|
"datetime": "2025-04-08 13:00:00",
|
||||||
|
"open": 1568.24,
|
||||||
|
"high": 1574.51,
|
||||||
|
"low": 1565.23,
|
||||||
|
"close": 1566.08,
|
||||||
|
"volume": 14240.1502
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744110000000,
|
||||||
|
"datetime": "2025-04-08 14:00:00",
|
||||||
|
"open": 1566.09,
|
||||||
|
"high": 1590.08,
|
||||||
|
"low": 1564.8,
|
||||||
|
"close": 1588.32,
|
||||||
|
"volume": 30094.5739
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744113600000,
|
||||||
|
"datetime": "2025-04-08 15:00:00",
|
||||||
|
"open": 1588.32,
|
||||||
|
"high": 1593.47,
|
||||||
|
"low": 1567.69,
|
||||||
|
"close": 1571.5,
|
||||||
|
"volume": 33511.7649
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744117200000,
|
||||||
|
"datetime": "2025-04-08 16:00:00",
|
||||||
|
"open": 1571.49,
|
||||||
|
"high": 1586.76,
|
||||||
|
"low": 1550.82,
|
||||||
|
"close": 1568.59,
|
||||||
|
"volume": 61586.1514
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744120800000,
|
||||||
|
"datetime": "2025-04-08 17:00:00",
|
||||||
|
"open": 1568.6,
|
||||||
|
"high": 1583.22,
|
||||||
|
"low": 1530.55,
|
||||||
|
"close": 1531.16,
|
||||||
|
"volume": 55558.0042
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744124400000,
|
||||||
|
"datetime": "2025-04-08 18:00:00",
|
||||||
|
"open": 1531.17,
|
||||||
|
"high": 1539.23,
|
||||||
|
"low": 1518.29,
|
||||||
|
"close": 1531.77,
|
||||||
|
"volume": 44725.2813
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744128000000,
|
||||||
|
"datetime": "2025-04-08 19:00:00",
|
||||||
|
"open": 1531.76,
|
||||||
|
"high": 1533.08,
|
||||||
|
"low": 1482.27,
|
||||||
|
"close": 1486.79,
|
||||||
|
"volume": 70419.8469
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744131600000,
|
||||||
|
"datetime": "2025-04-08 20:00:00",
|
||||||
|
"open": 1486.84,
|
||||||
|
"high": 1489.85,
|
||||||
|
"low": 1454.63,
|
||||||
|
"close": 1481.04,
|
||||||
|
"volume": 115487.7001
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744135200000,
|
||||||
|
"datetime": "2025-04-08 21:00:00",
|
||||||
|
"open": 1481.05,
|
||||||
|
"high": 1500.58,
|
||||||
|
"low": 1471.81,
|
||||||
|
"close": 1475.79,
|
||||||
|
"volume": 58873.5237
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"timestamp": 1744138800000,
|
||||||
|
"datetime": "2025-04-08 22:00:00",
|
||||||
|
"open": 1475.8,
|
||||||
|
"high": 1478.82,
|
||||||
|
"low": 1455.17,
|
||||||
|
"close": 1469.08,
|
||||||
|
"volume": 42842.5499
|
||||||
|
}
|
||||||
|
]
|
28608
data/eth_usdt_multi.json
Normal file
28608
data/eth_usdt_multi.json
Normal file
File diff suppressed because it is too large
Load Diff
2435
dataprovider_realtime.py
Normal file
2435
dataprovider_realtime.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -12,20 +12,42 @@ from collections import deque
|
|||||||
|
|
||||||
class ImprovedRewardCalculator:
|
class ImprovedRewardCalculator:
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
base_fee_rate=0.001, # 0.1% per transaction
|
max_drawdown_pct=0.1, # Maximum drawdown %
|
||||||
|
risk_reward_ratio=1.5, # Risk-reward ratio
|
||||||
|
base_fee_rate=0.0002, # 0.02% per transaction
|
||||||
max_frequency_penalty=0.005, # Maximum 0.5% penalty for frequent trading
|
max_frequency_penalty=0.005, # Maximum 0.5% penalty for frequent trading
|
||||||
holding_reward_rate=0.0001, # Small reward for holding profitable positions
|
holding_reward_rate=0.0001, # Small reward for holding profitable positions
|
||||||
risk_adjusted=True): # Use Sharpe ratio for risk adjustment
|
risk_adjusted=True, # Use Sharpe ratio for risk adjustment
|
||||||
|
base_reward=1.0, # Base reward scale
|
||||||
|
profit_factor=2.0, # Profit reward multiplier
|
||||||
|
loss_factor=1.0, # Loss penalty multiplier
|
||||||
|
trade_frequency_penalty=0.3, # Penalty for frequent trading
|
||||||
|
position_duration_factor=0.05 # Reward for longer positions
|
||||||
|
):
|
||||||
|
|
||||||
self.base_fee_rate = base_fee_rate
|
self.base_fee_rate = base_fee_rate
|
||||||
self.max_frequency_penalty = max_frequency_penalty
|
self.max_frequency_penalty = max_frequency_penalty
|
||||||
self.holding_reward_rate = holding_reward_rate
|
self.holding_reward_rate = holding_reward_rate
|
||||||
self.risk_adjusted = risk_adjusted
|
self.risk_adjusted = risk_adjusted
|
||||||
|
|
||||||
|
# New parameters
|
||||||
|
self.base_reward = base_reward
|
||||||
|
self.profit_factor = profit_factor
|
||||||
|
self.loss_factor = loss_factor
|
||||||
|
self.trade_frequency_penalty = trade_frequency_penalty
|
||||||
|
self.position_duration_factor = position_duration_factor
|
||||||
|
|
||||||
# Keep track of recent trades
|
# Keep track of recent trades
|
||||||
self.recent_trades = deque(maxlen=1000)
|
self.recent_trades = deque(maxlen=1000)
|
||||||
self.trade_pnls = deque(maxlen=100) # For risk adjustment
|
self.trade_pnls = deque(maxlen=100) # For risk adjustment
|
||||||
|
|
||||||
|
# Additional tracking metrics
|
||||||
|
self.total_trades = 0
|
||||||
|
self.profitable_trades = 0
|
||||||
|
self.total_pnl = 0.0
|
||||||
|
self.daily_pnl = {}
|
||||||
|
self.hourly_pnl = {}
|
||||||
|
|
||||||
def record_trade(self, timestamp=None, action=None, price=None):
|
def record_trade(self, timestamp=None, action=None, price=None):
|
||||||
"""Record a trade for frequency tracking"""
|
"""Record a trade for frequency tracking"""
|
||||||
if timestamp is None:
|
if timestamp is None:
|
||||||
@ -38,8 +60,30 @@ class ImprovedRewardCalculator:
|
|||||||
})
|
})
|
||||||
|
|
||||||
def record_pnl(self, pnl):
|
def record_pnl(self, pnl):
|
||||||
"""Record a PnL result for risk adjustment"""
|
"""Record a PnL result for risk adjustment and tracking metrics"""
|
||||||
self.trade_pnls.append(pnl)
|
self.trade_pnls.append(pnl)
|
||||||
|
|
||||||
|
# Update overall metrics
|
||||||
|
self.total_trades += 1
|
||||||
|
self.total_pnl += pnl
|
||||||
|
|
||||||
|
if pnl > 0:
|
||||||
|
self.profitable_trades += 1
|
||||||
|
|
||||||
|
# Track daily and hourly PnL
|
||||||
|
now = datetime.now()
|
||||||
|
day_key = now.strftime('%Y-%m-%d')
|
||||||
|
hour_key = now.strftime('%Y-%m-%d %H:00')
|
||||||
|
|
||||||
|
# Update daily PnL
|
||||||
|
if day_key not in self.daily_pnl:
|
||||||
|
self.daily_pnl[day_key] = 0.0
|
||||||
|
self.daily_pnl[day_key] += pnl
|
||||||
|
|
||||||
|
# Update hourly PnL
|
||||||
|
if hour_key not in self.hourly_pnl:
|
||||||
|
self.hourly_pnl[hour_key] = 0.0
|
||||||
|
self.hourly_pnl[hour_key] += pnl
|
||||||
|
|
||||||
def _calculate_frequency_penalty(self):
|
def _calculate_frequency_penalty(self):
|
||||||
"""Calculate penalty for trading too frequently"""
|
"""Calculate penalty for trading too frequently"""
|
||||||
|
358
live_trading.log
358
live_trading.log
@ -1,358 +0,0 @@
|
|||||||
2025-03-17 02:49:17,843 - INFO - Starting live trading demo for ETH/USDT on 1m timeframe
|
|
||||||
2025-03-17 02:49:17,844 - INFO - Using model: models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 02:49:17,847 - INFO - Exchange initialized with standard CCXT: mexc
|
|
||||||
2025-03-17 02:49:17,848 - INFO - Fetching initial data for ETH/USDT
|
|
||||||
2025-03-17 02:49:18,537 - ERROR - Error fetching OHLCV data: mexc {"code":700002,"msg":"Signature for this request is not valid."}
|
|
||||||
2025-03-17 02:49:18,537 - WARNING - No initial data received
|
|
||||||
2025-03-17 02:49:18,537 - ERROR - Failed to fetch initial data. Exiting.
|
|
||||||
2025-03-17 02:50:45,182 - INFO - Starting live trading demo for ETH/USDT on 1m timeframe
|
|
||||||
2025-03-17 02:50:45,182 - INFO - Using model: models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 02:50:45,182 - INFO - Using mock data for demo mode (no API keys required)
|
|
||||||
2025-03-17 02:50:45,182 - INFO - Generating mock data for ETH/USDT (1m)
|
|
||||||
2025-03-17 02:50:45,189 - INFO - Generated 1000 mock candles
|
|
||||||
2025-03-17 02:50:45,217 - INFO - Using GPU: NVIDIA GeForce RTX 4060 Laptop GPU
|
|
||||||
2025-03-17 02:50:46,501 - WARNING - Failed to load with weights_only=True: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 02:50:46,566 - WARNING - Failed with safe_globals: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy.dtype was not an allowed global by default. Please use `torch.serialization.add_safe_globals([dtype])` or the `torch.serialization.safe_globals([dtype])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 02:50:46,623 - ERROR - Error in live trading: Error(s) in loading state_dict for DQN:
|
|
||||||
size mismatch for fc1.weight: copying a param with shape torch.Size([384, 40]) from checkpoint, the shape in current model is torch.Size([256, 64]).
|
|
||||||
size mismatch for fc1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln1.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for lstm.weight_ih_l0: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.weight_hh_l0: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.bias_ih_l0: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for lstm.bias_hh_l0: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for lstm.weight_ih_l1: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.weight_hh_l1: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.bias_ih_l1: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for lstm.bias_hh_l1: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for attention.in_proj_weight: copying a param with shape torch.Size([1152, 384]) from checkpoint, the shape in current model is torch.Size([768, 256]).
|
|
||||||
size mismatch for attention.in_proj_bias: copying a param with shape torch.Size([1152]) from checkpoint, the shape in current model is torch.Size([768]).
|
|
||||||
size mismatch for attention.out_proj.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for attention.out_proj.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for fc2.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for fc2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln2.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for fc3.weight: copying a param with shape torch.Size([192, 384]) from checkpoint, the shape in current model is torch.Size([128, 256]).
|
|
||||||
size mismatch for fc3.bias: copying a param with shape torch.Size([192]) from checkpoint, the shape in current model is torch.Size([128]).
|
|
||||||
size mismatch for value_stream.weight: copying a param with shape torch.Size([1, 192]) from checkpoint, the shape in current model is torch.Size([1, 128]).
|
|
||||||
size mismatch for advantage_stream.weight: copying a param with shape torch.Size([4, 192]) from checkpoint, the shape in current model is torch.Size([3, 128]).
|
|
||||||
size mismatch for advantage_stream.bias: copying a param with shape torch.Size([4]) from checkpoint, the shape in current model is torch.Size([3]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.in_proj_weight: copying a param with shape torch.Size([1152, 384]) from checkpoint, the shape in current model is torch.Size([768, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.in_proj_bias: copying a param with shape torch.Size([1152]) from checkpoint, the shape in current model is torch.Size([768]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.out_proj.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.out_proj.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.linear1.weight: copying a param with shape torch.Size([2048, 384]) from checkpoint, the shape in current model is torch.Size([2048, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.linear2.weight: copying a param with shape torch.Size([384, 2048]) from checkpoint, the shape in current model is torch.Size([256, 2048]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.linear2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm1.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm2.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.in_proj_weight: copying a param with shape torch.Size([1152, 384]) from checkpoint, the shape in current model is torch.Size([768, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.in_proj_bias: copying a param with shape torch.Size([1152]) from checkpoint, the shape in current model is torch.Size([768]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.out_proj.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.out_proj.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.linear1.weight: copying a param with shape torch.Size([2048, 384]) from checkpoint, the shape in current model is torch.Size([2048, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.linear2.weight: copying a param with shape torch.Size([384, 2048]) from checkpoint, the shape in current model is torch.Size([256, 2048]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.linear2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm1.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm2.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
2025-03-17 02:50:46,625 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 236, in run_live_demo
|
|
||||||
agent.load(args.model)
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\main.py", line 1776, in load
|
|
||||||
self.policy_net.load_state_dict(checkpoint['policy_net'])
|
|
||||||
File "C:\Users\popov\miniforge3\Lib\site-packages\torch\nn\modules\module.py", line 2581, in load_state_dict
|
|
||||||
raise RuntimeError(
|
|
||||||
RuntimeError: Error(s) in loading state_dict for DQN:
|
|
||||||
size mismatch for fc1.weight: copying a param with shape torch.Size([384, 40]) from checkpoint, the shape in current model is torch.Size([256, 64]).
|
|
||||||
size mismatch for fc1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln1.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for lstm.weight_ih_l0: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.weight_hh_l0: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.bias_ih_l0: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for lstm.bias_hh_l0: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for lstm.weight_ih_l1: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.weight_hh_l1: copying a param with shape torch.Size([1536, 384]) from checkpoint, the shape in current model is torch.Size([1024, 256]).
|
|
||||||
size mismatch for lstm.bias_ih_l1: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for lstm.bias_hh_l1: copying a param with shape torch.Size([1536]) from checkpoint, the shape in current model is torch.Size([1024]).
|
|
||||||
size mismatch for attention.in_proj_weight: copying a param with shape torch.Size([1152, 384]) from checkpoint, the shape in current model is torch.Size([768, 256]).
|
|
||||||
size mismatch for attention.in_proj_bias: copying a param with shape torch.Size([1152]) from checkpoint, the shape in current model is torch.Size([768]).
|
|
||||||
size mismatch for attention.out_proj.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for attention.out_proj.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for fc2.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for fc2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln2.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for ln2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for fc3.weight: copying a param with shape torch.Size([192, 384]) from checkpoint, the shape in current model is torch.Size([128, 256]).
|
|
||||||
size mismatch for fc3.bias: copying a param with shape torch.Size([192]) from checkpoint, the shape in current model is torch.Size([128]).
|
|
||||||
size mismatch for value_stream.weight: copying a param with shape torch.Size([1, 192]) from checkpoint, the shape in current model is torch.Size([1, 128]).
|
|
||||||
size mismatch for advantage_stream.weight: copying a param with shape torch.Size([4, 192]) from checkpoint, the shape in current model is torch.Size([3, 128]).
|
|
||||||
size mismatch for advantage_stream.bias: copying a param with shape torch.Size([4]) from checkpoint, the shape in current model is torch.Size([3]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.in_proj_weight: copying a param with shape torch.Size([1152, 384]) from checkpoint, the shape in current model is torch.Size([768, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.in_proj_bias: copying a param with shape torch.Size([1152]) from checkpoint, the shape in current model is torch.Size([768]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.out_proj.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.self_attn.out_proj.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.linear1.weight: copying a param with shape torch.Size([2048, 384]) from checkpoint, the shape in current model is torch.Size([2048, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.linear2.weight: copying a param with shape torch.Size([384, 2048]) from checkpoint, the shape in current model is torch.Size([256, 2048]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.linear2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm1.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm2.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.0.norm2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.in_proj_weight: copying a param with shape torch.Size([1152, 384]) from checkpoint, the shape in current model is torch.Size([768, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.in_proj_bias: copying a param with shape torch.Size([1152]) from checkpoint, the shape in current model is torch.Size([768]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.out_proj.weight: copying a param with shape torch.Size([384, 384]) from checkpoint, the shape in current model is torch.Size([256, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.self_attn.out_proj.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.linear1.weight: copying a param with shape torch.Size([2048, 384]) from checkpoint, the shape in current model is torch.Size([2048, 256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.linear2.weight: copying a param with shape torch.Size([384, 2048]) from checkpoint, the shape in current model is torch.Size([256, 2048]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.linear2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm1.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm1.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm2.weight: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
size mismatch for transformer_encoder.layers.1.norm2.bias: copying a param with shape torch.Size([384]) from checkpoint, the shape in current model is torch.Size([256]).
|
|
||||||
|
|
||||||
2025-03-17 02:52:12,557 - INFO - Starting live trading demo for ETH/USDT on 1m timeframe
|
|
||||||
2025-03-17 02:52:12,558 - INFO - Using model: models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 02:52:12,558 - INFO - Using mock data for demo mode (no API keys required)
|
|
||||||
2025-03-17 02:52:12,558 - INFO - Generating mock data for ETH/USDT (1m)
|
|
||||||
2025-03-17 02:52:12,565 - INFO - Generated 1000 mock candles
|
|
||||||
2025-03-17 02:52:12,607 - INFO - Extracted model architecture: state_size=40, action_size=4, hidden_size=384, lstm_layers=2, attention_heads=4
|
|
||||||
2025-03-17 02:52:12,636 - INFO - Using GPU: NVIDIA GeForce RTX 4060 Laptop GPU
|
|
||||||
2025-03-17 02:52:13,909 - WARNING - Failed to load with weights_only=True: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 02:52:13,973 - WARNING - Failed with safe_globals: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy.dtype was not an allowed global by default. Please use `torch.serialization.add_safe_globals([dtype])` or the `torch.serialization.safe_globals([dtype])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 02:52:14,032 - INFO - Model loaded from models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 02:52:14,032 - INFO - Model loaded successfully
|
|
||||||
2025-03-17 02:52:14,035 - INFO - Starting live trading simulation...
|
|
||||||
2025-03-17 02:52:19,117 - ERROR - Error in live trading loop: not enough values to unpack (expected 4, got 3)
|
|
||||||
2025-03-17 02:52:19,118 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 367, in run_live_demo
|
|
||||||
next_state, reward, done, info = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: not enough values to unpack (expected 4, got 3)
|
|
||||||
|
|
||||||
2025-03-17 02:52:19,118 - INFO - Continuing after error...
|
|
||||||
2025-03-17 02:52:29,139 - ERROR - Error in live trading loop: not enough values to unpack (expected 4, got 3)
|
|
||||||
2025-03-17 02:52:29,140 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 367, in run_live_demo
|
|
||||||
next_state, reward, done, info = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: not enough values to unpack (expected 4, got 3)
|
|
||||||
|
|
||||||
2025-03-17 02:52:29,140 - INFO - Continuing after error...
|
|
||||||
2025-03-17 02:52:39,157 - ERROR - Error in live trading loop: not enough values to unpack (expected 4, got 3)
|
|
||||||
2025-03-17 02:52:39,157 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 367, in run_live_demo
|
|
||||||
next_state, reward, done, info = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: not enough values to unpack (expected 4, got 3)
|
|
||||||
|
|
||||||
2025-03-17 02:52:39,158 - INFO - Continuing after error...
|
|
||||||
2025-03-17 02:52:49,176 - ERROR - Error in live trading loop: not enough values to unpack (expected 4, got 3)
|
|
||||||
2025-03-17 02:52:49,177 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 367, in run_live_demo
|
|
||||||
next_state, reward, done, info = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: not enough values to unpack (expected 4, got 3)
|
|
||||||
|
|
||||||
2025-03-17 02:52:49,177 - INFO - Continuing after error...
|
|
||||||
2025-03-17 02:52:59,196 - ERROR - Error in live trading loop: not enough values to unpack (expected 4, got 3)
|
|
||||||
2025-03-17 02:52:59,196 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 367, in run_live_demo
|
|
||||||
next_state, reward, done, info = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: not enough values to unpack (expected 4, got 3)
|
|
||||||
|
|
||||||
2025-03-17 02:52:59,196 - INFO - Continuing after error...
|
|
||||||
2025-03-17 02:53:09,220 - ERROR - Error in live trading loop: not enough values to unpack (expected 4, got 3)
|
|
||||||
2025-03-17 02:53:09,220 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 367, in run_live_demo
|
|
||||||
next_state, reward, done, info = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: not enough values to unpack (expected 4, got 3)
|
|
||||||
|
|
||||||
2025-03-17 02:53:09,220 - INFO - Continuing after error...
|
|
||||||
2025-03-17 02:53:19,244 - ERROR - Error in live trading loop: not enough values to unpack (expected 4, got 3)
|
|
||||||
2025-03-17 02:53:19,245 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 367, in run_live_demo
|
|
||||||
next_state, reward, done, info = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: not enough values to unpack (expected 4, got 3)
|
|
||||||
|
|
||||||
2025-03-17 02:53:19,245 - INFO - Continuing after error...
|
|
||||||
2025-03-17 02:53:53,471 - INFO - Starting live trading demo for ETH/USDT on 1m timeframe
|
|
||||||
2025-03-17 02:53:53,472 - INFO - Using model: models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 02:53:53,472 - INFO - Using mock data for demo mode (no API keys required)
|
|
||||||
2025-03-17 02:53:53,472 - INFO - Generating mock data for ETH/USDT (1m)
|
|
||||||
2025-03-17 02:53:53,479 - INFO - Generated 1000 mock candles
|
|
||||||
2025-03-17 02:53:53,520 - INFO - Extracted model architecture: state_size=40, action_size=4, hidden_size=384, lstm_layers=2, attention_heads=4
|
|
||||||
2025-03-17 02:53:53,552 - INFO - Using GPU: NVIDIA GeForce RTX 4060 Laptop GPU
|
|
||||||
2025-03-17 02:53:54,887 - WARNING - Failed to load with weights_only=True: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 02:53:54,958 - WARNING - Failed with safe_globals: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy.dtype was not an allowed global by default. Please use `torch.serialization.add_safe_globals([dtype])` or the `torch.serialization.safe_globals([dtype])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 02:53:55,016 - INFO - Model loaded from models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 02:53:55,017 - INFO - Model loaded successfully
|
|
||||||
2025-03-17 02:53:55,019 - INFO - Starting live trading simulation...
|
|
||||||
2025-03-17 02:54:24,295 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:54:54,484 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:55:24,631 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:55:54,809 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:56:24,987 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:56:55,157 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:57:25,288 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:57:55,450 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:58:25,571 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:58:55,733 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:59:25,898 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 02:59:55,196 - INFO - Starting live trading demo for ETH/USDT on 1m timeframe
|
|
||||||
2025-03-17 02:59:55,196 - INFO - Using model: models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 02:59:55,200 - INFO - Exchange initialized with standard CCXT: mexc
|
|
||||||
2025-03-17 02:59:55,200 - INFO - Fetching initial data for ETH/USDT
|
|
||||||
2025-03-17 02:59:55,844 - ERROR - Error fetching OHLCV data: mexc {"code":700002,"msg":"Signature for this request is not valid."}
|
|
||||||
2025-03-17 02:59:55,844 - WARNING - No initial data received
|
|
||||||
2025-03-17 02:59:55,844 - ERROR - Failed to fetch initial data. Exiting.
|
|
||||||
2025-03-17 02:59:56,090 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:00:26,253 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:00:56,413 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:01:26,591 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:01:56,732 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:02:26,890 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:02:57,233 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:03:27,392 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:03:57,555 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:04:27,713 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:04:57,867 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:05:28,019 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:05:58,171 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:06:28,323 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:06:58,510 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:07:28,695 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:07:58,884 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:08:29,079 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:08:59,295 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:09:29,452 - ERROR - Error creating chart: Line2D.set() got an unexpected keyword argument 'type'
|
|
||||||
2025-03-17 03:40:19,333 - INFO - Starting live trading demo for ETH/USDT on 1m timeframe
|
|
||||||
2025-03-17 03:40:19,333 - INFO - Using model: models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 03:40:19,336 - INFO - Exchange initialized with standard CCXT: mexc
|
|
||||||
2025-03-17 03:40:19,336 - INFO - Fetching initial data for ETH/USDT
|
|
||||||
2025-03-17 03:40:24,689 - INFO - Fetched 500 candles for ETH/USDT (1m)
|
|
||||||
2025-03-17 03:40:24,693 - INFO - Initialized environment with 500 candles
|
|
||||||
2025-03-17 03:40:24,728 - INFO - Extracted model architecture: state_size=64, action_size=4, hidden_size=384, lstm_layers=2, attention_heads=4
|
|
||||||
2025-03-17 03:40:24,748 - INFO - Using GPU: NVIDIA GeForce RTX 4060 Laptop GPU
|
|
||||||
2025-03-17 03:40:25,795 - ERROR - Error loading model: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 03:40:25,797 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\main.py", line 1819, in load
|
|
||||||
checkpoint = torch.load(path, map_location=self.device)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "C:\Users\popov\miniforge3\Lib\site-packages\torch\serialization.py", line 1470, in load
|
|
||||||
raise pickle.UnpicklingError(_get_wo_message(str(e))) from None
|
|
||||||
_pickle.UnpicklingError: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
|
|
||||||
2025-03-17 03:40:25,797 - WARNING - Failed to load model with weights_only=True: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
2025-03-17 03:40:25,860 - ERROR - Error loading model: 'str' object has no attribute '__module__'
|
|
||||||
2025-03-17 03:40:25,863 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 280, in run_live_demo
|
|
||||||
agent.load(args.model)
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\main.py", line 1819, in load
|
|
||||||
checkpoint = torch.load(path, map_location=self.device)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "C:\Users\popov\miniforge3\Lib\site-packages\torch\serialization.py", line 1470, in load
|
|
||||||
raise pickle.UnpicklingError(_get_wo_message(str(e))) from None
|
|
||||||
_pickle.UnpicklingError: Weights only load failed. This file can still be loaded, to do so you have two options, [1mdo those steps only if you trust the source of the checkpoint[0m.
|
|
||||||
(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.
|
|
||||||
(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.
|
|
||||||
WeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.
|
|
||||||
|
|
||||||
Check the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html.
|
|
||||||
|
|
||||||
During handling of the above exception, another exception occurred:
|
|
||||||
|
|
||||||
Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\main.py", line 1819, in load
|
|
||||||
checkpoint = torch.load(path, map_location=self.device)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "C:\Users\popov\miniforge3\Lib\site-packages\torch\serialization.py", line 1462, in load
|
|
||||||
return _load(
|
|
||||||
^^^^^^
|
|
||||||
File "C:\Users\popov\miniforge3\Lib\site-packages\torch\serialization.py", line 1964, in _load
|
|
||||||
result = unpickler.load()
|
|
||||||
^^^^^^^^^^^^^^^^
|
|
||||||
File "C:\Users\popov\miniforge3\Lib\site-packages\torch\_weights_only_unpickler.py", line 334, in load
|
|
||||||
elif full_path in _get_user_allowed_globals():
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
File "C:\Users\popov\miniforge3\Lib\site-packages\torch\_weights_only_unpickler.py", line 144, in _get_user_allowed_globals
|
|
||||||
module, name = f.__module__, f.__name__
|
|
||||||
^^^^^^^^^^^^
|
|
||||||
AttributeError: 'str' object has no attribute '__module__'. Did you mean: '__mod__'?
|
|
||||||
|
|
||||||
2025-03-17 03:40:25,863 - WARNING - Failed with safe_globals: 'str' object has no attribute '__module__'
|
|
||||||
2025-03-17 03:40:25,919 - INFO - Model loaded from models/trading_agent_best_pnl.pt
|
|
||||||
2025-03-17 03:40:25,920 - INFO - Model loaded successfully
|
|
||||||
2025-03-17 03:40:25,925 - INFO - Starting live trading simulation...
|
|
||||||
2025-03-17 03:40:26,348 - INFO - Fetched 1 candles for ETH/USDT (1m)
|
|
||||||
2025-03-17 03:40:26,406 - ERROR - Error in live trading loop: too many values to unpack (expected 3)
|
|
||||||
2025-03-17 03:40:26,406 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 370, in run_live_demo
|
|
||||||
next_state, reward, done = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: too many values to unpack (expected 3)
|
|
||||||
|
|
||||||
2025-03-17 03:40:26,406 - INFO - Continuing after error...
|
|
||||||
2025-03-17 03:40:31,926 - INFO - Fetched 1 candles for ETH/USDT (1m)
|
|
||||||
2025-03-17 03:40:31,933 - ERROR - Error in live trading loop: too many values to unpack (expected 3)
|
|
||||||
2025-03-17 03:40:31,933 - ERROR - Traceback (most recent call last):
|
|
||||||
File "D:\DEV\workspace\REPOS\git.d-popov.com\ai-kevin\crypto\gogo2\run_live_demo.py", line 370, in run_live_demo
|
|
||||||
next_state, reward, done = env.step(action)
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
ValueError: too many values to unpack (expected 3)
|
|
||||||
|
|
||||||
2025-03-17 03:40:31,933 - INFO - Continuing after error...
|
|
@ -1,535 +0,0 @@
|
|||||||
2025-03-17 00:45:43,111 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 00:45:43,112 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 00:45:44,066 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:45:44,578 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf5 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:45:45,566 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:45:46,573 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:45:46,965 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xcb in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:45:48,226 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:45:56,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8a in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:03,066 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xaf in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:07,079 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8b in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:07,084 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8b in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:07,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8b in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:08,074 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:08,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xab in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:09,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x93 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:10,071 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x87 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:10,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xfb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:11,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:46:11,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:46:12,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8b in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:12,076 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:46:12,571 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8b in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:13,075 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbf in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:13,879 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xe9 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:15,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x83 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:17,066 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xe0 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:17,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:18,095 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd1 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:18,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:19,074 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb0 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:20,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8b in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:21,077 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:21,307 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf0 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:21,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xda in position 62: invalid continuation byte
|
|
||||||
2025-03-17 00:46:22,648 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xab in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:22,719 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xda in position 62: invalid continuation byte
|
|
||||||
2025-03-17 00:46:23,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8c in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:23,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8c in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:24,076 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc1 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:24,577 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xab in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:25,071 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x9f in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:25,588 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:26,070 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x87 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:27,767 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xab in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:29,577 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbd in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:30,076 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb1 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:31,065 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8f in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:31,527 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xdb in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:31,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x84 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:31,643 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xca in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:32,068 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf8 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:32,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xeb in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:33,071 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xdf in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:33,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd4 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:34,065 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc7 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:35,051 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x9b in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:35,565 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa3 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:36,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x97 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:36,575 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x95 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:37,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xff in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:37,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:38,075 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xda in position 62: invalid continuation byte
|
|
||||||
2025-03-17 00:46:38,339 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf5 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:39,081 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8d in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:40,072 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:41,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x93 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:41,808 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x83 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:41,835 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa1 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:45,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb3 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:45,608 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8d in position 60: invalid start byte
|
|
||||||
2025-03-17 00:46:49,288 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:49,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:50,071 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc7 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:50,072 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd1 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:50,227 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xe7 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:46:56,068 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:57,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x93 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:58,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x87 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:46:58,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xfb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:00,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8e in position 60: invalid start byte
|
|
||||||
2025-03-17 00:47:00,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8e in position 60: invalid start byte
|
|
||||||
2025-03-17 00:47:01,066 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbf in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:01,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb3 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:02,066 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:02,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x9b in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:03,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8f in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:03,117 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc1 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:03,566 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x83 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:06,070 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc7 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:06,168 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xac in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:09,065 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x80 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:09,426 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xe9 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:12,256 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf5 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:16,856 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:47:17,573 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb3 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:18,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:19,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x83 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:20,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:20,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x81 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:23,068 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xaf in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:26,048 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:28,429 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa0 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:29,409 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xda in position 62: invalid continuation byte
|
|
||||||
2025-03-17 00:47:33,080 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x90 in position 60: invalid start byte
|
|
||||||
2025-03-17 00:47:34,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:35,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x83 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:35,576 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8d in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:36,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xeb in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:37,553 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbf in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:38,070 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc7 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:39,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa3 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:40,075 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x97 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:42,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xdc in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:43,066 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xcf in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:43,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x91 in position 60: invalid start byte
|
|
||||||
2025-03-17 00:47:44,070 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:45,638 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x91 in position 60: invalid start byte
|
|
||||||
2025-03-17 00:47:46,070 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x87 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:46,566 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xfb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:48,080 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:47:48,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x91 in position 60: invalid start byte
|
|
||||||
2025-03-17 00:47:49,071 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc0 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:49,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb3 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:50,071 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:50,567 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x9b in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:51,077 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x99 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:51,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x83 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:52,073 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:53,067 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xdf in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:54,079 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc7 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:54,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:55,071 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xaf in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:56,577 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x95 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:47:57,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:58,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xdb in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:59,072 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xd0 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:47:59,566 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:48:00,069 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:00,578 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb5 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:01,070 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa0 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:02,073 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x87 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:03,069 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:48:03,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:48:05,074 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbf in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:06,087 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xa7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:06,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x9b in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:07,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x83 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:08,073 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:08,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xec in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:48:09,066 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xdf in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:48:10,571 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xbb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:12,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x8b in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:13,569 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf4 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:48:14,589 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xf0 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:48:15,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xc3 in position 58: invalid continuation byte
|
|
||||||
2025-03-17 00:48:16,070 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xb7 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:16,568 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xab in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:17,570 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x93 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:18,081 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x91 in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:18,566 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0xfb in position 58: invalid start byte
|
|
||||||
2025-03-17 00:48:19,077 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:48:19,578 - ERROR - Error processing message: 'utf-8' codec can't decode byte 0x89 in position 61: invalid start byte
|
|
||||||
2025-03-17 00:48:20,921 - INFO - Cleaned up resources
|
|
||||||
2025-03-17 00:48:20,923 - INFO - Program interrupted by user
|
|
||||||
2025-03-17 00:48:26,295 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 00:48:26,295 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 00:48:59,605 - WARNING - WebSocket connection closed
|
|
||||||
2025-03-17 00:48:59,606 - INFO - Cleaned up resources
|
|
||||||
2025-03-17 00:49:36,347 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 00:49:36,348 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 00:50:09,797 - WARNING - WebSocket connection closed
|
|
||||||
2025-03-17 00:50:09,797 - INFO - Cleaned up resources
|
|
||||||
2025-03-17 00:50:13,164 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 00:50:13,165 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 00:50:44,610 - WARNING - WebSocket connection closed
|
|
||||||
2025-03-17 00:50:44,610 - INFO - Cleaned up resources
|
|
||||||
2025-03-17 00:50:58,754 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 00:50:58,754 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 00:51:30,236 - WARNING - WebSocket connection closed
|
|
||||||
2025-03-17 00:51:30,236 - INFO - Cleaned up resources
|
|
||||||
2025-03-17 00:52:24,356 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 00:52:24,356 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 00:52:24,613 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.deals.v3.api@BTCUSDT]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:24,613 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:24,872 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:24,873 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:25,136 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:25,137 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:25,395 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:25,395 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:25,654 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:25,655 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:25,911 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:25,911 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:26,167 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:26,168 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:26,426 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:26,426 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:26,688 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:26,688 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:26,944 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:26,945 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:27,204 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:27,204 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:27,462 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:27,463 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:27,718 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:27,720 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:27,977 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:27,978 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:28,234 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:28,236 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:28,495 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:28,495 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:28,756 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:28,756 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:29,012 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:29,013 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:29,272 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:29,273 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:29,531 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:29,532 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:29,791 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:29,792 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:30,051 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:30,051 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:30,311 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:30,311 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:30,568 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:30,569 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:30,826 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:30,827 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:31,084 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:31,084 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:31,341 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:31,342 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:31,600 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:31,600 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:31,859 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:31,860 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:32,121 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:32,122 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:32,380 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:32,380 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:32,637 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:32,638 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:32,896 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:32,897 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:33,153 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:33,154 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:33,411 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:33,411 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:33,667 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:33,667 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:33,923 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:33,924 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:34,179 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:34,180 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:34,439 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:34,439 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:34,696 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:34,696 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:34,953 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:34,953 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:35,211 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:35,211 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:35,467 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:35,468 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:35,724 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:35,724 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:35,983 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:35,984 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:36,244 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:36,244 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:36,504 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:36,504 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:36,759 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:36,760 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:37,019 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:37,020 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:37,282 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:37,284 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:37,540 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:37,541 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:37,797 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:37,797 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:38,055 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:38,056 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:38,315 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:38,315 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:38,571 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:38,572 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:38,827 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:38,828 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:39,087 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:39,087 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:39,344 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:39,344 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:39,600 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:39,600 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:39,858 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:39,858 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:40,120 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:40,120 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:40,380 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:40,381 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:40,635 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:40,636 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:40,891 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:40,892 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:41,148 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:41,149 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:41,406 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:41,406 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:41,664 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:41,664 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:41,924 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:41,924 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:42,183 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:42,184 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:42,440 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:42,440 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:42,696 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:42,697 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:42,956 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:42,956 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:43,213 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:43,213 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:43,471 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:43,472 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:43,731 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:43,732 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:43,991 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:43,992 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:44,250 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:44,250 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:44,508 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:44,509 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:44,767 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:44,767 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:45,024 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:45,024 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:45,284 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:45,284 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:45,544 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:45,545 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:45,802 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:45,802 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:46,060 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:46,061 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:46,316 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:46,316 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:46,571 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:46,572 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:46,831 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:46,831 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:47,087 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:47,087 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:47,344 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:47,344 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:47,606 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:47,607 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:47,869 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:47,870 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:48,128 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:48,128 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:48,384 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:48,385 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:48,641 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:48,641 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:48,902 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:48,903 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:49,159 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:49,160 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:49,417 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:49,417 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:49,676 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:49,676 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:49,936 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:49,936 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:50,195 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:50,195 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:50,452 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:50,452 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:50,712 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:50,713 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:50,972 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:50,973 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:51,228 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:51,229 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:51,484 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:51,484 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:51,741 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:51,741 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:52,000 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:52,001 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:52,260 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:52,261 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:52,516 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:52,516 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:52:52,772 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:52:52,772 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:10,924 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 00:53:10,925 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 00:53:11,182 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.deals.v3.api@BTCUSDT]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:11,183 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:11,444 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:11,445 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:11,704 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:11,704 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:11,962 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:11,963 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:12,222 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:12,222 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:12,481 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:12,481 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:12,741 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:12,741 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:13,000 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:13,001 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:13,258 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:13,258 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:13,520 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:13,521 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:13,780 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:13,781 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:14,041 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:14,042 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:14,301 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:14,301 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:14,561 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:14,561 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:14,820 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:14,821 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:15,081 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:15,081 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:15,341 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:15,341 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:15,601 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:15,601 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:15,861 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:15,862 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:16,189 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:16,189 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:16,466 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:16,467 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:16,726 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:16,727 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:16,984 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:16,985 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:17,243 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:17,243 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:17,505 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:17,505 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:17,765 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:17,766 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:18,026 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:18,026 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:18,284 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:18,285 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:18,545 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:18,545 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:18,803 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:18,803 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:19,061 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:19,061 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:19,320 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 00:53:19,320 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 00:53:19,801 - INFO - Cleaned up resources
|
|
||||||
2025-03-17 00:53:19,803 - INFO - Program interrupted by user
|
|
||||||
2025-03-17 01:05:53,831 - INFO - Connected to MEXC WebSocket for BTCUSDT
|
|
||||||
2025-03-17 01:05:53,831 - INFO - Subscribed to BTCUSDT tick data
|
|
||||||
2025-03-17 01:05:54,105 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.deals.v3.api@BTCUSDT]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:54,106 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:54,364 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:54,365 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:54,624 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:54,624 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:54,884 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:54,885 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:55,180 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:55,180 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:55,437 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:55,438 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:55,697 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:55,697 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:55,956 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:55,956 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:56,216 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:56,217 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:56,476 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:56,476 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:56,736 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:56,737 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:56,996 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:56,997 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:57,256 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:57,257 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:57,515 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:57,515 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:57,773 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:57,774 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:58,032 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:58,033 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:58,290 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:58,291 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:58,549 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:58,549 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:58,806 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:58,806 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:59,065 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:59,065 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:59,329 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:59,329 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:59,589 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:59,589 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:05:59,849 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:05:59,850 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:00,157 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:06:00,157 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:00,416 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:06:00,417 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:00,676 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:06:00,677 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:00,940 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:06:00,940 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:01,196 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:06:01,197 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:01,457 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:06:01,457 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:01,714 - ERROR - Subscription blocked: Not Subscribed successfully! [spot@public.kline.v3.api@BTCUSDT@1m]. Reason: Blocked!
|
|
||||||
2025-03-17 01:06:01,714 - INFO - Subscribed to BTCUSDT kline data
|
|
||||||
2025-03-17 01:06:02,047 - INFO - Cleaned up resources
|
|
||||||
2025-03-17 01:06:02,048 - INFO - Program interrupted by user
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
1773
realtime.py
1773
realtime.py
File diff suppressed because it is too large
Load Diff
@ -3,4 +3,6 @@ plotly>=5.18.0
|
|||||||
dash>=2.14.0
|
dash>=2.14.0
|
||||||
pandas>=2.0.0
|
pandas>=2.0.0
|
||||||
numpy>=1.24.0
|
numpy>=1.24.0
|
||||||
python-dotenv>=1.0.0
|
python-dotenv>=1.0.0
|
||||||
|
psutil>=5.9.0
|
||||||
|
tensorboard>=2.15.0
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -35,7 +35,7 @@ logger = logging.getLogger('realtime_training')
|
|||||||
|
|
||||||
# Import the model and data interfaces
|
# Import the model and data interfaces
|
||||||
from NN.models.cnn_model_pytorch import CNNModelPyTorch
|
from NN.models.cnn_model_pytorch import CNNModelPyTorch
|
||||||
from NN.utils.data_interface import DataInterface
|
from realtime import MultiTimeframeDataInterface
|
||||||
from NN.utils.signal_interpreter import SignalInterpreter
|
from NN.utils.signal_interpreter import SignalInterpreter
|
||||||
|
|
||||||
# Global variables for graceful shutdown
|
# Global variables for graceful shutdown
|
||||||
@ -99,25 +99,32 @@ def run_overnight_training():
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Initialize data interface
|
# Initialize data interface
|
||||||
logger.info("Initializing data interface...")
|
logger.info("Initializing MultiTimeframeDataInterface...")
|
||||||
data_interface = DataInterface(
|
data_interface = MultiTimeframeDataInterface(
|
||||||
symbol=symbol,
|
symbol=symbol,
|
||||||
timeframes=timeframes
|
timeframes=timeframes
|
||||||
)
|
)
|
||||||
|
|
||||||
# Prepare initial training data
|
# Prepare initial training data
|
||||||
logger.info("Loading initial training data...")
|
logger.info("Loading initial training data...")
|
||||||
X_train, y_train, X_val, y_val, train_prices, val_prices = data_interface.prepare_training_data(
|
X_train_dict, y_train, X_val_dict, y_val, train_prices, val_prices = data_interface.prepare_training_data(
|
||||||
refresh=True,
|
window_size=window_size,
|
||||||
refresh_interval=data_refresh_interval
|
refresh=True
|
||||||
)
|
)
|
||||||
|
|
||||||
if X_train is None or y_train is None:
|
if X_train_dict is None or y_train is None:
|
||||||
logger.error("Failed to load training data")
|
logger.error("Failed to load training data")
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.info(f"Training data loaded - X shape: {X_train.shape}, y shape: {y_train.shape}")
|
# Get reference timeframe (lowest timeframe)
|
||||||
logger.info(f"Validation data - X shape: {X_val.shape}, y shape: {y_val.shape}")
|
reference_tf = min(timeframes, key=lambda x: data_interface.timeframe_to_seconds.get(x, 3600))
|
||||||
|
logger.info(f"Using {reference_tf} as reference timeframe")
|
||||||
|
|
||||||
|
# Log data shape information
|
||||||
|
for tf, X in X_train_dict.items():
|
||||||
|
logger.info(f"Training data for {tf} - X shape: {X.shape}")
|
||||||
|
logger.info(f"Target labels shape: {y_train.shape}")
|
||||||
|
logger.info(f"Validation data for {reference_tf} - X shape: {X_val_dict[reference_tf].shape}, y shape: {y_val.shape}")
|
||||||
|
|
||||||
# Target distribution analysis
|
# Target distribution analysis
|
||||||
target_distribution = {
|
target_distribution = {
|
||||||
@ -136,11 +143,11 @@ def run_overnight_training():
|
|||||||
val_future_prices = data_interface.get_future_prices(val_prices, n_candles=8)
|
val_future_prices = data_interface.get_future_prices(val_prices, n_candles=8)
|
||||||
|
|
||||||
# Initialize model
|
# Initialize model
|
||||||
num_features = data_interface.get_feature_count()
|
num_features = X_train_dict[reference_tf].shape[2] # Get feature count from the data
|
||||||
logger.info(f"Initializing model with {num_features} features")
|
logger.info(f"Initializing model with {num_features} features")
|
||||||
|
|
||||||
# Use the same window size as the data interface
|
# Use the same window size as the data
|
||||||
actual_window_size = X_train.shape[1]
|
actual_window_size = X_train_dict[reference_tf].shape[1]
|
||||||
logger.info(f"Actual window size from data: {actual_window_size}")
|
logger.info(f"Actual window size from data: {actual_window_size}")
|
||||||
|
|
||||||
# Try to load existing model if available
|
# Try to load existing model if available
|
||||||
@ -193,15 +200,15 @@ def run_overnight_training():
|
|||||||
# Check if we need to refresh data
|
# Check if we need to refresh data
|
||||||
if time.time() - last_data_refresh_time > data_refresh_interval:
|
if time.time() - last_data_refresh_time > data_refresh_interval:
|
||||||
logger.info("Refreshing training data...")
|
logger.info("Refreshing training data...")
|
||||||
X_train, y_train, X_val, y_val, train_prices, val_prices = data_interface.prepare_training_data(
|
X_train_dict, y_train, X_val_dict, y_val, train_prices, val_prices = data_interface.prepare_training_data(
|
||||||
refresh=True,
|
window_size=window_size,
|
||||||
refresh_interval=data_refresh_interval
|
refresh=True
|
||||||
)
|
)
|
||||||
|
|
||||||
if X_train is None or y_train is None:
|
if X_train_dict is None or y_train is None:
|
||||||
logger.warning("Failed to refresh training data. Using previous data.")
|
logger.warning("Failed to refresh training data. Using previous data.")
|
||||||
else:
|
else:
|
||||||
logger.info(f"Refreshed training data - X shape: {X_train.shape}, y shape: {y_train.shape}")
|
logger.info(f"Refreshed training data for {reference_tf} - X shape: {X_train_dict[reference_tf].shape}, y shape: {y_train.shape}")
|
||||||
|
|
||||||
# Recalculate future prices
|
# Recalculate future prices
|
||||||
train_future_prices = data_interface.get_future_prices(train_prices, n_candles=8)
|
train_future_prices = data_interface.get_future_prices(train_prices, n_candles=8)
|
||||||
@ -209,6 +216,12 @@ def run_overnight_training():
|
|||||||
|
|
||||||
last_data_refresh_time = time.time()
|
last_data_refresh_time = time.time()
|
||||||
|
|
||||||
|
# Convert multi-timeframe dict to the format expected by the model
|
||||||
|
# For now, we use only the reference timeframe, but in the future,
|
||||||
|
# the model should be updated to handle multi-timeframe inputs
|
||||||
|
X_train = X_train_dict[reference_tf]
|
||||||
|
X_val = X_val_dict[reference_tf]
|
||||||
|
|
||||||
# Train one epoch
|
# Train one epoch
|
||||||
train_action_loss, train_price_loss, train_acc = model.train_epoch(
|
train_action_loss, train_price_loss, train_acc = model.train_epoch(
|
||||||
X_train, y_train, train_future_prices, batch_size
|
X_train, y_train, train_future_prices, batch_size
|
231
train_config.py
Normal file
231
train_config.py
Normal file
@ -0,0 +1,231 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
"""
|
||||||
|
Training Configuration for GOGO2 Trading System
|
||||||
|
|
||||||
|
This module provides a central configuration for all training scripts,
|
||||||
|
ensuring they use real market data and follow consistent practices.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
import train_config
|
||||||
|
config = train_config.get_config('supervised') # or 'reinforcement' or 'hybrid'
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
import json
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Ensure consistent logging across all training scripts
|
||||||
|
log_dir = Path("logs")
|
||||||
|
log_dir.mkdir(exist_ok=True)
|
||||||
|
log_file = log_dir / f"training_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler(log_file),
|
||||||
|
logging.StreamHandler()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
logger = logging.getLogger('training')
|
||||||
|
|
||||||
|
# Define available training types
|
||||||
|
TRAINING_TYPES = {
|
||||||
|
'supervised': {
|
||||||
|
'description': 'Supervised learning using CNN model',
|
||||||
|
'script': 'train_with_realtime.py',
|
||||||
|
'model_class': 'CNNModelPyTorch',
|
||||||
|
'data_interface': 'MultiTimeframeDataInterface'
|
||||||
|
},
|
||||||
|
'reinforcement': {
|
||||||
|
'description': 'Reinforcement learning using DQN agent',
|
||||||
|
'script': 'train_rl_with_realtime.py',
|
||||||
|
'model_class': 'DQNAgent',
|
||||||
|
'data_interface': 'MultiTimeframeDataInterface'
|
||||||
|
},
|
||||||
|
'hybrid': {
|
||||||
|
'description': 'Combined supervised and reinforcement learning',
|
||||||
|
'script': 'train_hybrid.py', # To be implemented
|
||||||
|
'model_class': 'HybridModel', # To be implemented
|
||||||
|
'data_interface': 'MultiTimeframeDataInterface'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default configuration
|
||||||
|
DEFAULT_CONFIG = {
|
||||||
|
# Market data configuration
|
||||||
|
'market_data': {
|
||||||
|
'use_real_data_only': True, # IMPORTANT: Only use real market data, never synthetic
|
||||||
|
'symbol': 'BTC/USDT',
|
||||||
|
'timeframes': ['1m', '5m', '15m'],
|
||||||
|
'window_size': 24,
|
||||||
|
'data_refresh_interval': 300, # seconds
|
||||||
|
'use_indicators': True
|
||||||
|
},
|
||||||
|
|
||||||
|
# Training parameters
|
||||||
|
'training': {
|
||||||
|
'max_training_time': 12 * 3600, # seconds (12 hours)
|
||||||
|
'checkpoint_interval': 3600, # seconds (1 hour)
|
||||||
|
'batch_size': 64,
|
||||||
|
'learning_rate': 0.0001,
|
||||||
|
'optimizer': 'adam',
|
||||||
|
'loss_function': 'custom_pnl' # Focus on profitability
|
||||||
|
},
|
||||||
|
|
||||||
|
# Model paths
|
||||||
|
'paths': {
|
||||||
|
'models_dir': 'NN/models/saved',
|
||||||
|
'logs_dir': 'logs',
|
||||||
|
'tensorboard_dir': 'runs'
|
||||||
|
},
|
||||||
|
|
||||||
|
# GPU configuration
|
||||||
|
'hardware': {
|
||||||
|
'use_gpu': True,
|
||||||
|
'mixed_precision': True,
|
||||||
|
'device': 'cuda' if os.environ.get('CUDA_VISIBLE_DEVICES') is not None else 'cpu'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_config(training_type='supervised', custom_config=None):
|
||||||
|
"""
|
||||||
|
Get configuration for a specific training type
|
||||||
|
|
||||||
|
Args:
|
||||||
|
training_type (str): Type of training ('supervised', 'reinforcement', or 'hybrid')
|
||||||
|
custom_config (dict): Optional custom configuration to merge
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Complete configuration
|
||||||
|
"""
|
||||||
|
if training_type not in TRAINING_TYPES:
|
||||||
|
raise ValueError(f"Invalid training type: {training_type}. Must be one of {list(TRAINING_TYPES.keys())}")
|
||||||
|
|
||||||
|
# Start with default configuration
|
||||||
|
config = DEFAULT_CONFIG.copy()
|
||||||
|
|
||||||
|
# Add training type-specific configuration
|
||||||
|
config['training_type'] = training_type
|
||||||
|
config['training_info'] = TRAINING_TYPES[training_type]
|
||||||
|
|
||||||
|
# Override with custom configuration if provided
|
||||||
|
if custom_config:
|
||||||
|
_deep_update(config, custom_config)
|
||||||
|
|
||||||
|
# Validate configuration
|
||||||
|
_validate_config(config)
|
||||||
|
|
||||||
|
return config
|
||||||
|
|
||||||
|
def save_config(config, filepath=None):
|
||||||
|
"""
|
||||||
|
Save configuration to a JSON file
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (dict): Configuration to save
|
||||||
|
filepath (str): Path to save to (default: based on training type and timestamp)
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: Path where configuration was saved
|
||||||
|
"""
|
||||||
|
if filepath is None:
|
||||||
|
timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
|
||||||
|
training_type = config.get('training_type', 'unknown')
|
||||||
|
filepath = f"configs/training_{training_type}_{timestamp}.json"
|
||||||
|
|
||||||
|
os.makedirs(os.path.dirname(filepath), exist_ok=True)
|
||||||
|
|
||||||
|
with open(filepath, 'w') as f:
|
||||||
|
json.dump(config, f, indent=2)
|
||||||
|
|
||||||
|
logger.info(f"Configuration saved to {filepath}")
|
||||||
|
return filepath
|
||||||
|
|
||||||
|
def load_config(filepath):
|
||||||
|
"""
|
||||||
|
Load configuration from a JSON file
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filepath (str): Path to load from
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Loaded configuration
|
||||||
|
"""
|
||||||
|
with open(filepath, 'r') as f:
|
||||||
|
config = json.load(f)
|
||||||
|
|
||||||
|
# Validate the loaded configuration
|
||||||
|
_validate_config(config)
|
||||||
|
|
||||||
|
logger.info(f"Configuration loaded from {filepath}")
|
||||||
|
return config
|
||||||
|
|
||||||
|
def _deep_update(target, source):
|
||||||
|
"""
|
||||||
|
Deep update a nested dictionary
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (dict): Target dictionary to update
|
||||||
|
source (dict): Source dictionary with updates
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Updated target dictionary
|
||||||
|
"""
|
||||||
|
for key, value in source.items():
|
||||||
|
if key in target and isinstance(target[key], dict) and isinstance(value, dict):
|
||||||
|
_deep_update(target[key], value)
|
||||||
|
else:
|
||||||
|
target[key] = value
|
||||||
|
return target
|
||||||
|
|
||||||
|
def _validate_config(config):
|
||||||
|
"""
|
||||||
|
Validate configuration to ensure it follows required guidelines
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (dict): Configuration to validate
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if valid, raises exception otherwise
|
||||||
|
"""
|
||||||
|
# Enforce real data policy
|
||||||
|
if config.get('use_real_data_only', True) is not True:
|
||||||
|
logger.error("POLICY VIOLATION: Real market data policy requires only using real data")
|
||||||
|
raise ValueError("Configuration violates policy: Must use only real market data, never synthetic")
|
||||||
|
|
||||||
|
# Add explicit check at the beginning of the validation function
|
||||||
|
if 'allow_synthetic_data' in config and config['allow_synthetic_data'] is True:
|
||||||
|
logger.error("POLICY VIOLATION: Synthetic data is not allowed under any circumstances")
|
||||||
|
raise ValueError("Configuration violates policy: Synthetic data is explicitly forbidden")
|
||||||
|
|
||||||
|
# Validate symbol
|
||||||
|
if not config['market_data']['symbol'] or '/' not in config['market_data']['symbol']:
|
||||||
|
raise ValueError(f"Invalid symbol format: {config['market_data']['symbol']}")
|
||||||
|
|
||||||
|
# Validate timeframes
|
||||||
|
if not config['market_data']['timeframes']:
|
||||||
|
raise ValueError("At least one timeframe must be specified")
|
||||||
|
|
||||||
|
# Ensure window size is reasonable
|
||||||
|
if config['market_data']['window_size'] < 10 or config['market_data']['window_size'] > 500:
|
||||||
|
raise ValueError(f"Window size out of reasonable range: {config['market_data']['window_size']}")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Show available training configurations
|
||||||
|
print("Available Training Configurations:")
|
||||||
|
print("=" * 40)
|
||||||
|
for training_type, info in TRAINING_TYPES.items():
|
||||||
|
print(f"{training_type.upper()}: {info['description']}")
|
||||||
|
|
||||||
|
# Example of getting and saving a configuration
|
||||||
|
config = get_config('supervised')
|
||||||
|
save_config(config)
|
||||||
|
|
||||||
|
print("\nDefault configuration generated and saved.")
|
||||||
|
print(f"Log file: {log_file}")
|
415
train_dqn.py
Normal file
415
train_dqn.py
Normal file
@ -0,0 +1,415 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
"""
|
||||||
|
DQN Training Session with Monitoring
|
||||||
|
|
||||||
|
This script sets up and runs a DQN agent training session with progress monitoring.
|
||||||
|
It tracks key metrics like rewards, losses, and prediction accuracy, and
|
||||||
|
visualizes the agent's learning progress.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import argparse
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
import signal
|
||||||
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
|
|
||||||
|
# Add project root to path if needed
|
||||||
|
project_root = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
if project_root not in sys.path:
|
||||||
|
sys.path.append(project_root)
|
||||||
|
|
||||||
|
# Import configurations
|
||||||
|
import train_config
|
||||||
|
|
||||||
|
# Import key components
|
||||||
|
from NN.models.dqn_agent import DQNAgent
|
||||||
|
from realtime import MultiTimeframeDataInterface
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
log_dir = Path("logs")
|
||||||
|
log_dir.mkdir(exist_ok=True)
|
||||||
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
log_file = log_dir / f"dqn_training_{timestamp}.log"
|
||||||
|
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler(log_file),
|
||||||
|
logging.StreamHandler()
|
||||||
|
]
|
||||||
|
)
|
||||||
|
logger = logging.getLogger('dqn_training')
|
||||||
|
|
||||||
|
# Global variables for graceful shutdown
|
||||||
|
running = True
|
||||||
|
|
||||||
|
# Configure signal handler for graceful shutdown
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
global running
|
||||||
|
logger.info("Received interrupt signal. Finishing current episode and saving model...")
|
||||||
|
running = False
|
||||||
|
|
||||||
|
# Register signal handler
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
class DQNTrainingMonitor:
|
||||||
|
"""
|
||||||
|
Class to monitor DQN training progress and visualize results
|
||||||
|
"""
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.device = torch.device(config['hardware']['device'])
|
||||||
|
self.agent = None
|
||||||
|
self.data_interface = None
|
||||||
|
|
||||||
|
# Training stats
|
||||||
|
self.episode_rewards = []
|
||||||
|
self.avg_rewards = []
|
||||||
|
self.losses = []
|
||||||
|
self.epsilons = []
|
||||||
|
self.best_reward = -float('inf')
|
||||||
|
self.tensorboard_writer = None
|
||||||
|
|
||||||
|
# Paths
|
||||||
|
self.models_dir = Path(config['paths']['models_dir'])
|
||||||
|
self.models_dir.mkdir(exist_ok=True, parents=True)
|
||||||
|
|
||||||
|
# Metrics display intervals
|
||||||
|
self.plot_interval = config.get('visualization', {}).get('plot_interval', 5)
|
||||||
|
self.save_interval = config.get('training', {}).get('save_interval', 10)
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
"""Initialize the DQN agent and data interface"""
|
||||||
|
# Set up TensorBoard
|
||||||
|
tb_dir = Path(self.config['paths']['tensorboard_dir'])
|
||||||
|
tb_dir.mkdir(exist_ok=True, parents=True)
|
||||||
|
log_dir = tb_dir / f"dqn_{timestamp}"
|
||||||
|
self.tensorboard_writer = SummaryWriter(log_dir=str(log_dir))
|
||||||
|
logger.info(f"TensorBoard initialized at {log_dir}")
|
||||||
|
|
||||||
|
# Initialize data interface
|
||||||
|
symbol = self.config['market_data']['symbol']
|
||||||
|
timeframes = self.config['market_data']['timeframes']
|
||||||
|
window_size = self.config['market_data']['window_size']
|
||||||
|
|
||||||
|
logger.info(f"Initializing data interface for {symbol} with timeframes {timeframes}")
|
||||||
|
self.data_interface = MultiTimeframeDataInterface(
|
||||||
|
symbol=symbol,
|
||||||
|
timeframes=timeframes
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get data for training
|
||||||
|
X_train_dict, _, _, _, _, _ = self.data_interface.prepare_training_data(
|
||||||
|
window_size=window_size,
|
||||||
|
refresh=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if X_train_dict is None:
|
||||||
|
raise ValueError("Failed to load training data for DQN agent")
|
||||||
|
|
||||||
|
# Get feature count from the reference timeframe
|
||||||
|
reference_tf = min(
|
||||||
|
timeframes,
|
||||||
|
key=lambda x: self.data_interface.timeframe_to_seconds.get(x, 3600)
|
||||||
|
)
|
||||||
|
|
||||||
|
num_features = X_train_dict[reference_tf].shape[2]
|
||||||
|
logger.info(f"Using {num_features} features from timeframe {reference_tf}")
|
||||||
|
|
||||||
|
# Initialize DQN agent
|
||||||
|
state_size = num_features * window_size * len(timeframes)
|
||||||
|
action_size = 3 # Buy, Hold, Sell
|
||||||
|
|
||||||
|
logger.info(f"Initializing DQN agent with state size {state_size} and action size {action_size}")
|
||||||
|
self.agent = DQNAgent(
|
||||||
|
state_shape=(len(timeframes), window_size, num_features), # Multi-dimensional state shape
|
||||||
|
n_actions=action_size,
|
||||||
|
learning_rate=self.config['training']['learning_rate'],
|
||||||
|
batch_size=self.config['training']['batch_size'],
|
||||||
|
gamma=self.config.get('model', {}).get('gamma', 0.95),
|
||||||
|
epsilon=self.config.get('model', {}).get('epsilon_start', 1.0),
|
||||||
|
epsilon_min=self.config.get('model', {}).get('epsilon_min', 0.01),
|
||||||
|
epsilon_decay=self.config.get('model', {}).get('epsilon_decay', 0.995),
|
||||||
|
buffer_size=self.config.get('model', {}).get('memory_size', 10000),
|
||||||
|
device=self.device
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load existing model if available
|
||||||
|
model_path = self.models_dir / "dqn_agent_best"
|
||||||
|
if os.path.exists(f"{model_path}_policy.pt") and not self.config.get('model', {}).get('new_model', False):
|
||||||
|
logger.info(f"Loading existing DQN model from {model_path}")
|
||||||
|
try:
|
||||||
|
self.agent.load(str(model_path))
|
||||||
|
logger.info("DQN model loaded successfully")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading model: {str(e)}")
|
||||||
|
logger.info("Starting with a new model instead")
|
||||||
|
else:
|
||||||
|
logger.info("Starting with a new model")
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def train(self, num_episodes=100):
|
||||||
|
"""Train the DQN agent for a specified number of episodes"""
|
||||||
|
if self.agent is None:
|
||||||
|
raise ValueError("Agent not initialized. Call initialize() first.")
|
||||||
|
|
||||||
|
logger.info(f"Starting DQN training for {num_episodes} episodes")
|
||||||
|
|
||||||
|
# Get training data
|
||||||
|
window_size = self.config['market_data']['window_size']
|
||||||
|
X_train_dict, y_train, _, _, _, _ = self.data_interface.prepare_training_data(
|
||||||
|
window_size=window_size,
|
||||||
|
refresh=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare data for training
|
||||||
|
reference_tf = min(
|
||||||
|
self.config['market_data']['timeframes'],
|
||||||
|
key=lambda x: self.data_interface.timeframe_to_seconds.get(x, 3600)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Convert data to flat states for RL
|
||||||
|
states = []
|
||||||
|
actions = []
|
||||||
|
|
||||||
|
# Find the minimum length across all timeframes to ensure consistent indexing
|
||||||
|
min_length = min(len(X_train_dict[tf]) for tf in self.config['market_data']['timeframes'])
|
||||||
|
logger.info(f"Using {min_length} samples from each timeframe for training")
|
||||||
|
|
||||||
|
# Only use indices that exist in all timeframes
|
||||||
|
for i in range(min_length):
|
||||||
|
state = []
|
||||||
|
for tf in self.config['market_data']['timeframes']:
|
||||||
|
state.extend(X_train_dict[tf][i].flatten())
|
||||||
|
states.append(np.array(state))
|
||||||
|
actions.append(np.argmax(y_train[i]))
|
||||||
|
|
||||||
|
logger.info(f"Prepared {len(states)} state-action pairs for training")
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
global running
|
||||||
|
for episode in range(1, num_episodes + 1):
|
||||||
|
if not running:
|
||||||
|
logger.info("Training interrupted. Saving final model.")
|
||||||
|
self._save_model(final=True)
|
||||||
|
break
|
||||||
|
|
||||||
|
episode_reward = 0
|
||||||
|
total_loss = 0
|
||||||
|
correct_predictions = 0
|
||||||
|
|
||||||
|
# Randomly sample start position (to prevent overfitting on sequence)
|
||||||
|
start_idx = np.random.randint(0, len(states) - 1000) if len(states) > 1000 else 0
|
||||||
|
end_idx = min(start_idx + 1000, len(states))
|
||||||
|
|
||||||
|
logger.info(f"Episode {episode}/{num_episodes} - Training on sequence from {start_idx} to {end_idx}")
|
||||||
|
|
||||||
|
# Training on sequence
|
||||||
|
for i in range(start_idx, end_idx - 1):
|
||||||
|
state = states[i]
|
||||||
|
action = actions[i]
|
||||||
|
next_state = states[i + 1]
|
||||||
|
|
||||||
|
# Get reward based on price movement
|
||||||
|
# Price is typically the close price (4th column in OHLCV data)
|
||||||
|
try:
|
||||||
|
# Assuming the last feature in each timeframe is the closing price
|
||||||
|
price_current = X_train_dict[reference_tf][i][-1, -1] # Last row, last column of current state
|
||||||
|
price_next = X_train_dict[reference_tf][i+1][-1, -1] # Last row, last column of next state
|
||||||
|
price_diff = price_next - price_current
|
||||||
|
except IndexError:
|
||||||
|
# Fallback if we're at the edge of our data
|
||||||
|
price_diff = 0
|
||||||
|
|
||||||
|
if action == 0: # Buy
|
||||||
|
reward = price_diff * 100 # Scale reward for better learning
|
||||||
|
elif action == 2: # Sell
|
||||||
|
reward = -price_diff * 100
|
||||||
|
else: # Hold
|
||||||
|
reward = abs(price_diff) * 10 if abs(price_diff) < 0.0001 else -abs(price_diff) * 50
|
||||||
|
|
||||||
|
# Train the agent with this experience
|
||||||
|
predicted_action = self.agent.act(state)
|
||||||
|
|
||||||
|
# Store experience in memory
|
||||||
|
done = (i == end_idx - 2) # Mark as done if it's the last step
|
||||||
|
self.agent.remember(state, action, reward, next_state, done)
|
||||||
|
|
||||||
|
# Periodically replay from memory
|
||||||
|
if i % 10 == 0: # Replay every 10 steps
|
||||||
|
loss = self.agent.replay()
|
||||||
|
else:
|
||||||
|
loss = None
|
||||||
|
|
||||||
|
if predicted_action == action:
|
||||||
|
correct_predictions += 1
|
||||||
|
|
||||||
|
episode_reward += reward
|
||||||
|
if loss is not None:
|
||||||
|
total_loss += loss
|
||||||
|
|
||||||
|
# Calculate metrics
|
||||||
|
accuracy = correct_predictions / (end_idx - start_idx) * 100
|
||||||
|
avg_loss = total_loss / (end_idx - start_idx) if end_idx > start_idx else 0
|
||||||
|
|
||||||
|
# Update training history
|
||||||
|
self.episode_rewards.append(episode_reward)
|
||||||
|
self.avg_rewards.append(self.agent.avg_reward)
|
||||||
|
self.losses.append(avg_loss)
|
||||||
|
self.epsilons.append(self.agent.epsilon)
|
||||||
|
|
||||||
|
# Log metrics
|
||||||
|
logger.info(f"Episode {episode} - Reward: {episode_reward:.2f}, Avg Reward: {self.agent.avg_reward:.2f}, "
|
||||||
|
f"Loss: {avg_loss:.4f}, Accuracy: {accuracy:.2f}%, Epsilon: {self.agent.epsilon:.4f}")
|
||||||
|
|
||||||
|
# Log to TensorBoard
|
||||||
|
self._log_to_tensorboard(episode, episode_reward, avg_loss, accuracy)
|
||||||
|
|
||||||
|
# Save model if improved
|
||||||
|
improved = episode_reward > self.best_reward
|
||||||
|
if improved:
|
||||||
|
self.best_reward = episode_reward
|
||||||
|
logger.info(f"New best reward: {self.best_reward:.2f}")
|
||||||
|
|
||||||
|
# Periodically save model
|
||||||
|
if episode % self.save_interval == 0 or improved:
|
||||||
|
self._save_model(final=False)
|
||||||
|
|
||||||
|
# Plot progress
|
||||||
|
if episode % self.plot_interval == 0:
|
||||||
|
self._plot_training_progress()
|
||||||
|
|
||||||
|
# Save final model
|
||||||
|
logger.info("Training completed.")
|
||||||
|
self._save_model(final=True)
|
||||||
|
|
||||||
|
def _log_to_tensorboard(self, episode, reward, loss, accuracy):
|
||||||
|
"""Log training metrics to TensorBoard"""
|
||||||
|
if self.tensorboard_writer:
|
||||||
|
self.tensorboard_writer.add_scalar('Train/Reward', reward, episode)
|
||||||
|
self.tensorboard_writer.add_scalar('Train/AvgReward', self.agent.avg_reward, episode)
|
||||||
|
self.tensorboard_writer.add_scalar('Train/Loss', loss, episode)
|
||||||
|
self.tensorboard_writer.add_scalar('Train/Accuracy', accuracy, episode)
|
||||||
|
self.tensorboard_writer.add_scalar('Train/Epsilon', self.agent.epsilon, episode)
|
||||||
|
|
||||||
|
def _save_model(self, final=False):
|
||||||
|
"""Save the DQN model"""
|
||||||
|
if final:
|
||||||
|
save_path = self.models_dir / f"dqn_agent_final_{timestamp}"
|
||||||
|
else:
|
||||||
|
save_path = self.models_dir / "dqn_agent_best"
|
||||||
|
|
||||||
|
self.agent.save(str(save_path))
|
||||||
|
logger.info(f"Model saved to {save_path}")
|
||||||
|
|
||||||
|
def _plot_training_progress(self):
|
||||||
|
"""Plot training progress metrics"""
|
||||||
|
if not self.episode_rewards:
|
||||||
|
logger.warning("No training data available for plotting yet")
|
||||||
|
return
|
||||||
|
|
||||||
|
plt.figure(figsize=(15, 10))
|
||||||
|
|
||||||
|
# Plot rewards
|
||||||
|
plt.subplot(2, 2, 1)
|
||||||
|
plt.plot(self.episode_rewards, label='Episode Reward')
|
||||||
|
plt.plot(self.avg_rewards, label='Avg Reward', linestyle='--')
|
||||||
|
plt.title('Rewards')
|
||||||
|
plt.xlabel('Episode')
|
||||||
|
plt.ylabel('Reward')
|
||||||
|
plt.legend()
|
||||||
|
|
||||||
|
# Plot losses
|
||||||
|
plt.subplot(2, 2, 2)
|
||||||
|
plt.plot(self.losses)
|
||||||
|
plt.title('Loss')
|
||||||
|
plt.xlabel('Episode')
|
||||||
|
plt.ylabel('Loss')
|
||||||
|
|
||||||
|
# Plot epsilon
|
||||||
|
plt.subplot(2, 2, 3)
|
||||||
|
plt.plot(self.epsilons)
|
||||||
|
plt.title('Exploration Rate (Epsilon)')
|
||||||
|
plt.xlabel('Episode')
|
||||||
|
plt.ylabel('Epsilon')
|
||||||
|
|
||||||
|
# Save plot
|
||||||
|
plots_dir = Path("plots")
|
||||||
|
plots_dir.mkdir(exist_ok=True)
|
||||||
|
plt.tight_layout()
|
||||||
|
plt.savefig(plots_dir / f"dqn_training_progress_{timestamp}.png")
|
||||||
|
plt.close()
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser(description='DQN Training Session with Monitoring')
|
||||||
|
parser.add_argument('--episodes', type=int, default=100, help='Number of episodes to train')
|
||||||
|
parser.add_argument('--symbol', type=str, default='BTC/USDT', help='Trading symbol')
|
||||||
|
parser.add_argument('--timeframes', type=str, default='1m,5m,15m', help='Comma-separated timeframes')
|
||||||
|
parser.add_argument('--window', type=int, default=24, help='Window size for state construction')
|
||||||
|
parser.add_argument('--batch-size', type=int, default=64, help='Batch size for training')
|
||||||
|
parser.add_argument('--lr', type=float, default=0.0001, help='Learning rate')
|
||||||
|
parser.add_argument('--plot-interval', type=int, default=5, help='Interval for plotting progress')
|
||||||
|
parser.add_argument('--save-interval', type=int, default=10, help='Interval for saving model')
|
||||||
|
parser.add_argument('--new-model', action='store_true', help='Start with a new model instead of loading existing')
|
||||||
|
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
# Force CPU training to avoid device mismatch errors
|
||||||
|
os.environ['CUDA_VISIBLE_DEVICES'] = ''
|
||||||
|
os.environ['DISABLE_MIXED_PRECISION'] = '1'
|
||||||
|
|
||||||
|
# Create custom config based on arguments
|
||||||
|
custom_config = {
|
||||||
|
'market_data': {
|
||||||
|
'symbol': args.symbol,
|
||||||
|
'timeframes': args.timeframes.split(','),
|
||||||
|
'window_size': args.window
|
||||||
|
},
|
||||||
|
'training': {
|
||||||
|
'batch_size': args.batch_size,
|
||||||
|
'learning_rate': args.lr,
|
||||||
|
'save_interval': args.save_interval
|
||||||
|
},
|
||||||
|
'visualization': {
|
||||||
|
'plot_interval': args.plot_interval
|
||||||
|
},
|
||||||
|
'model': {
|
||||||
|
'new_model': args.new_model
|
||||||
|
},
|
||||||
|
'hardware': {
|
||||||
|
'device': 'cpu',
|
||||||
|
'mixed_precision': False
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Get configuration
|
||||||
|
config = train_config.get_config('reinforcement', custom_config)
|
||||||
|
|
||||||
|
# Save configuration for reference
|
||||||
|
config_dir = Path("configs")
|
||||||
|
config_dir.mkdir(exist_ok=True)
|
||||||
|
config_path = config_dir / f"dqn_training_config_{timestamp}.json"
|
||||||
|
train_config.save_config(config, str(config_path))
|
||||||
|
|
||||||
|
# Initialize and train
|
||||||
|
monitor = DQNTrainingMonitor(config)
|
||||||
|
monitor.initialize()
|
||||||
|
monitor.train(num_episodes=args.episodes)
|
||||||
|
|
||||||
|
logger.info(f"Training completed. Results saved to logs and plots directories.")
|
||||||
|
logger.info(f"To visualize training in TensorBoard, run: tensorboard --logdir={config['paths']['tensorboard_dir']}")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
731
train_hybrid.py
Normal file
731
train_hybrid.py
Normal file
@ -0,0 +1,731 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
"""
|
||||||
|
Hybrid Training Script - Combining Supervised and Reinforcement Learning
|
||||||
|
|
||||||
|
This script provides a hybrid approach that:
|
||||||
|
1. Performs supervised learning on market data using CNN models
|
||||||
|
2. Uses reinforcement learning to optimize trading strategies
|
||||||
|
3. Only uses real market data (never synthetic)
|
||||||
|
|
||||||
|
The script enables both approaches to complement each other:
|
||||||
|
- CNN model learns patterns from historical data (supervised)
|
||||||
|
- RL agent optimizes actual trading decisions (reinforcement)
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import argparse
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
import asyncio
|
||||||
|
import signal
|
||||||
|
import threading
|
||||||
|
from datetime import datetime
|
||||||
|
from pathlib import Path
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from torch.utils.tensorboard import SummaryWriter
|
||||||
|
|
||||||
|
# Add project root to path if needed
|
||||||
|
project_root = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
if project_root not in sys.path:
|
||||||
|
sys.path.append(project_root)
|
||||||
|
|
||||||
|
# Import configurations
|
||||||
|
import train_config
|
||||||
|
|
||||||
|
# Import key components
|
||||||
|
from NN.models.cnn_model_pytorch import CNNModelPyTorch
|
||||||
|
from NN.models.dqn_agent import DQNAgent
|
||||||
|
from realtime import MultiTimeframeDataInterface, RealTimeChart
|
||||||
|
from NN.utils.signal_interpreter import SignalInterpreter
|
||||||
|
|
||||||
|
# Global variables for graceful shutdown
|
||||||
|
running = True
|
||||||
|
training_stats = {
|
||||||
|
"supervised": {
|
||||||
|
"epochs_completed": 0,
|
||||||
|
"best_val_pnl": -float('inf'),
|
||||||
|
"best_epoch": 0,
|
||||||
|
"best_win_rate": 0
|
||||||
|
},
|
||||||
|
"reinforcement": {
|
||||||
|
"episodes_completed": 0,
|
||||||
|
"best_reward": -float('inf'),
|
||||||
|
"best_episode": 0,
|
||||||
|
"best_win_rate": 0
|
||||||
|
},
|
||||||
|
"hybrid": {
|
||||||
|
"iterations_completed": 0,
|
||||||
|
"best_combined_score": -float('inf'),
|
||||||
|
"training_started": datetime.now().isoformat(),
|
||||||
|
"last_update": datetime.now().isoformat()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Configure signal handler for graceful shutdown
|
||||||
|
def signal_handler(sig, frame):
|
||||||
|
global running
|
||||||
|
logging.info("Received interrupt signal. Finishing current training cycle and saving models...")
|
||||||
|
running = False
|
||||||
|
|
||||||
|
# Register signal handler
|
||||||
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
class HybridModel:
|
||||||
|
"""
|
||||||
|
Hybrid model that combines supervised CNN learning with RL-based decision optimization
|
||||||
|
"""
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.device = torch.device(config['hardware']['device'])
|
||||||
|
self.supervised_model = None
|
||||||
|
self.rl_agent = None
|
||||||
|
self.data_interface = None
|
||||||
|
self.signal_interpreter = None
|
||||||
|
self.chart = None
|
||||||
|
|
||||||
|
# Training stats
|
||||||
|
self.tensorboard_writer = None
|
||||||
|
self.iter_count = 0
|
||||||
|
self.supervised_epochs = 0
|
||||||
|
self.rl_episodes = 0
|
||||||
|
|
||||||
|
# Initialize logging
|
||||||
|
self.logger = logging.getLogger('hybrid_model')
|
||||||
|
|
||||||
|
# Paths
|
||||||
|
self.models_dir = Path(config['paths']['models_dir'])
|
||||||
|
self.models_dir.mkdir(exist_ok=True, parents=True)
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
"""Initialize all components of the hybrid model"""
|
||||||
|
# Set up TensorBoard
|
||||||
|
log_dir = Path(self.config['paths']['tensorboard_dir']) / f"hybrid_{int(time.time())}"
|
||||||
|
self.tensorboard_writer = SummaryWriter(log_dir=str(log_dir))
|
||||||
|
self.logger.info(f"TensorBoard initialized at {log_dir}")
|
||||||
|
|
||||||
|
# Initialize data interface
|
||||||
|
symbol = self.config['market_data']['symbol']
|
||||||
|
timeframes = self.config['market_data']['timeframes']
|
||||||
|
window_size = self.config['market_data']['window_size']
|
||||||
|
|
||||||
|
self.logger.info(f"Initializing data interface for {symbol} with timeframes {timeframes}")
|
||||||
|
self.data_interface = MultiTimeframeDataInterface(
|
||||||
|
symbol=symbol,
|
||||||
|
timeframes=timeframes
|
||||||
|
)
|
||||||
|
|
||||||
|
# Initialize supervised model (CNN)
|
||||||
|
self._initialize_supervised_model(window_size)
|
||||||
|
|
||||||
|
# Initialize RL agent
|
||||||
|
self._initialize_rl_agent(window_size)
|
||||||
|
|
||||||
|
# Initialize signal interpreter
|
||||||
|
self.signal_interpreter = SignalInterpreter(config={
|
||||||
|
'buy_threshold': 0.65,
|
||||||
|
'sell_threshold': 0.65,
|
||||||
|
'hold_threshold': 0.75,
|
||||||
|
'trend_filter_enabled': True,
|
||||||
|
'volume_filter_enabled': True
|
||||||
|
})
|
||||||
|
|
||||||
|
# Initialize chart if visualization is enabled
|
||||||
|
if self.config.get('visualization', {}).get('enabled', False):
|
||||||
|
self._initialize_chart()
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _initialize_supervised_model(self, window_size):
|
||||||
|
"""Initialize the supervised CNN model"""
|
||||||
|
try:
|
||||||
|
# Get data shape information
|
||||||
|
X_train_dict, y_train, X_val_dict, y_val, _, _ = self.data_interface.prepare_training_data(
|
||||||
|
window_size=window_size,
|
||||||
|
refresh=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if X_train_dict is None or y_train is None:
|
||||||
|
raise ValueError("Failed to load training data")
|
||||||
|
|
||||||
|
# Get reference timeframe (lowest timeframe)
|
||||||
|
reference_tf = min(
|
||||||
|
self.config['market_data']['timeframes'],
|
||||||
|
key=lambda x: self.data_interface.timeframe_to_seconds.get(x, 3600)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get feature count from the data
|
||||||
|
num_features = X_train_dict[reference_tf].shape[2]
|
||||||
|
|
||||||
|
# Initialize model
|
||||||
|
self.logger.info(f"Initializing CNN model with {num_features} features")
|
||||||
|
|
||||||
|
self.supervised_model = CNNModelPyTorch(
|
||||||
|
window_size=window_size,
|
||||||
|
num_features=num_features,
|
||||||
|
output_size=3, # BUY/HOLD/SELL
|
||||||
|
timeframes=self.config['market_data']['timeframes']
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load existing model if available
|
||||||
|
model_path = self.models_dir / "supervised_model_best.pt"
|
||||||
|
if model_path.exists():
|
||||||
|
self.logger.info(f"Loading existing CNN model from {model_path}")
|
||||||
|
self.supervised_model.load(str(model_path))
|
||||||
|
self.logger.info("CNN model loaded successfully")
|
||||||
|
else:
|
||||||
|
self.logger.info("No existing CNN model found. Starting with a new model.")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error initializing supervised model: {str(e)}")
|
||||||
|
import traceback
|
||||||
|
self.logger.error(traceback.format_exc())
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _initialize_rl_agent(self, window_size):
|
||||||
|
"""Initialize the RL agent"""
|
||||||
|
try:
|
||||||
|
# Get data for RL training
|
||||||
|
X_train_dict, _, _, _, _, _ = self.data_interface.prepare_training_data(
|
||||||
|
window_size=window_size,
|
||||||
|
refresh=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if X_train_dict is None:
|
||||||
|
raise ValueError("Failed to load training data for RL agent")
|
||||||
|
|
||||||
|
# Get reference timeframe features
|
||||||
|
reference_tf = min(
|
||||||
|
self.config['market_data']['timeframes'],
|
||||||
|
key=lambda x: self.data_interface.timeframe_to_seconds.get(x, 3600)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate state size - this is more complex for RL
|
||||||
|
# For simplicity, we'll use the CNN's feature representation + position info
|
||||||
|
state_size = window_size * X_train_dict[reference_tf].shape[2] + 3 # +3 for position, equity, unrealized_pnl
|
||||||
|
|
||||||
|
# Initialize RL agent
|
||||||
|
self.logger.info(f"Initializing RL agent with state size {state_size}")
|
||||||
|
|
||||||
|
self.rl_agent = DQNAgent(
|
||||||
|
state_size=state_size,
|
||||||
|
n_actions=3, # BUY/HOLD/SELL
|
||||||
|
epsilon=1.0,
|
||||||
|
epsilon_decay=0.995,
|
||||||
|
epsilon_min=0.01,
|
||||||
|
learning_rate=self.config['training']['learning_rate'],
|
||||||
|
gamma=0.99,
|
||||||
|
buffer_size=10000,
|
||||||
|
batch_size=self.config['training']['batch_size'],
|
||||||
|
device=self.device
|
||||||
|
)
|
||||||
|
|
||||||
|
# Load existing agent if available
|
||||||
|
agent_path = self.models_dir / "rl_agent_best.pth"
|
||||||
|
if agent_path.exists():
|
||||||
|
self.logger.info(f"Loading existing RL agent from {agent_path}")
|
||||||
|
self.rl_agent.load(str(agent_path))
|
||||||
|
self.logger.info("RL agent loaded successfully")
|
||||||
|
else:
|
||||||
|
self.logger.info("No existing RL agent found. Starting with a new agent.")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error initializing RL agent: {str(e)}")
|
||||||
|
import traceback
|
||||||
|
self.logger.error(traceback.format_exc())
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _initialize_chart(self):
|
||||||
|
"""Initialize the RealTimeChart for visualization"""
|
||||||
|
try:
|
||||||
|
from realtime import RealTimeChart
|
||||||
|
|
||||||
|
symbol = self.config['market_data']['symbol']
|
||||||
|
self.logger.info(f"Initializing RealTimeChart for {symbol}")
|
||||||
|
|
||||||
|
self.chart = RealTimeChart(symbol=symbol)
|
||||||
|
|
||||||
|
# TODO: Start chart server in a background thread
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error initializing chart: {str(e)}")
|
||||||
|
self.chart = None
|
||||||
|
|
||||||
|
async def train_hybrid(self, iterations=10, sv_epochs_per_iter=5, rl_episodes_per_iter=2):
|
||||||
|
"""
|
||||||
|
Main hybrid training loop
|
||||||
|
|
||||||
|
Args:
|
||||||
|
iterations: Number of hybrid iterations to run
|
||||||
|
sv_epochs_per_iter: Number of supervised epochs per iteration
|
||||||
|
rl_episodes_per_iter: Number of RL episodes per iteration
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Training statistics
|
||||||
|
"""
|
||||||
|
self.logger.info(f"Starting hybrid training with {iterations} iterations")
|
||||||
|
self.logger.info(f"Each iteration includes {sv_epochs_per_iter} supervised epochs and {rl_episodes_per_iter} RL episodes")
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
for iteration in range(iterations):
|
||||||
|
if not running:
|
||||||
|
self.logger.info("Training stopped by user")
|
||||||
|
break
|
||||||
|
|
||||||
|
self.logger.info(f"Iteration {iteration+1}/{iterations}")
|
||||||
|
self.iter_count += 1
|
||||||
|
|
||||||
|
# 1. Supervised learning phase
|
||||||
|
self.logger.info("Starting supervised learning phase")
|
||||||
|
sv_stats = await self.train_supervised(epochs=sv_epochs_per_iter)
|
||||||
|
|
||||||
|
# 2. Reinforcement learning phase
|
||||||
|
self.logger.info("Starting reinforcement learning phase")
|
||||||
|
rl_stats = await self.train_reinforcement(episodes=rl_episodes_per_iter)
|
||||||
|
|
||||||
|
# 3. Update global training stats
|
||||||
|
self._update_training_stats(sv_stats, rl_stats)
|
||||||
|
|
||||||
|
# 4. Save models and stats
|
||||||
|
self._save_models_and_stats()
|
||||||
|
|
||||||
|
# 5. Log to TensorBoard
|
||||||
|
if self.tensorboard_writer:
|
||||||
|
self._log_to_tensorboard(iteration, sv_stats, rl_stats)
|
||||||
|
|
||||||
|
self.logger.info("Hybrid training completed")
|
||||||
|
return training_stats
|
||||||
|
|
||||||
|
async def train_supervised(self, epochs=5):
|
||||||
|
"""
|
||||||
|
Run supervised training for a specified number of epochs
|
||||||
|
|
||||||
|
Args:
|
||||||
|
epochs: Number of epochs to train
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Training statistics
|
||||||
|
"""
|
||||||
|
# Get fresh data
|
||||||
|
window_size = self.config['market_data']['window_size']
|
||||||
|
X_train_dict, y_train, X_val_dict, y_val, train_prices, val_prices = self.data_interface.prepare_training_data(
|
||||||
|
window_size=window_size,
|
||||||
|
refresh=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if X_train_dict is None or y_train is None:
|
||||||
|
self.logger.error("Failed to load training data")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Get reference timeframe (lowest timeframe)
|
||||||
|
reference_tf = min(
|
||||||
|
self.config['market_data']['timeframes'],
|
||||||
|
key=lambda x: self.data_interface.timeframe_to_seconds.get(x, 3600)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate future prices for profitability-focused loss function
|
||||||
|
train_future_prices = self.data_interface.get_future_prices(train_prices, n_candles=8)
|
||||||
|
val_future_prices = self.data_interface.get_future_prices(val_prices, n_candles=8)
|
||||||
|
|
||||||
|
# For now, we use only the reference timeframe
|
||||||
|
X_train = X_train_dict[reference_tf]
|
||||||
|
X_val = X_val_dict[reference_tf]
|
||||||
|
|
||||||
|
# Training stats
|
||||||
|
stats = {
|
||||||
|
"train_losses": [],
|
||||||
|
"val_losses": [],
|
||||||
|
"train_accuracies": [],
|
||||||
|
"val_accuracies": [],
|
||||||
|
"train_pnls": [],
|
||||||
|
"val_pnls": [],
|
||||||
|
"best_val_pnl": -float('inf'),
|
||||||
|
"best_epoch": -1
|
||||||
|
}
|
||||||
|
|
||||||
|
batch_size = self.config['training']['batch_size']
|
||||||
|
|
||||||
|
# Training loop
|
||||||
|
for epoch in range(epochs):
|
||||||
|
if not running:
|
||||||
|
break
|
||||||
|
|
||||||
|
epoch_start = time.time()
|
||||||
|
|
||||||
|
# Train one epoch
|
||||||
|
train_action_loss, train_price_loss, train_acc = self.supervised_model.train_epoch(
|
||||||
|
X_train, y_train, train_future_prices, batch_size
|
||||||
|
)
|
||||||
|
|
||||||
|
# Evaluate
|
||||||
|
val_action_loss, val_price_loss, val_acc = self.supervised_model.evaluate(
|
||||||
|
X_val, y_val, val_future_prices
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get predictions for PnL calculation
|
||||||
|
train_action_probs, _ = self.supervised_model.predict(X_train)
|
||||||
|
val_action_probs, _ = self.supervised_model.predict(X_val)
|
||||||
|
|
||||||
|
# Convert probabilities to actions
|
||||||
|
train_preds = np.argmax(train_action_probs, axis=1)
|
||||||
|
val_preds = np.argmax(val_action_probs, axis=1)
|
||||||
|
|
||||||
|
# Calculate PnL
|
||||||
|
train_pnl, train_win_rate, _ = self.data_interface.calculate_pnl(
|
||||||
|
train_preds, train_prices, position_size=1.0
|
||||||
|
)
|
||||||
|
val_pnl, val_win_rate, _ = self.data_interface.calculate_pnl(
|
||||||
|
val_preds, val_prices, position_size=1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update stats
|
||||||
|
stats["train_losses"].append(train_action_loss)
|
||||||
|
stats["val_losses"].append(val_action_loss)
|
||||||
|
stats["train_accuracies"].append(train_acc)
|
||||||
|
stats["val_accuracies"].append(val_acc)
|
||||||
|
stats["train_pnls"].append(train_pnl)
|
||||||
|
stats["val_pnls"].append(val_pnl)
|
||||||
|
|
||||||
|
# Check if this is the best model
|
||||||
|
if val_pnl > stats["best_val_pnl"]:
|
||||||
|
stats["best_val_pnl"] = val_pnl
|
||||||
|
stats["best_epoch"] = epoch
|
||||||
|
stats["best_win_rate"] = val_win_rate
|
||||||
|
|
||||||
|
# Save the best model
|
||||||
|
self.supervised_model.save(str(self.models_dir / "supervised_model_best.pt"))
|
||||||
|
|
||||||
|
# Log epoch results
|
||||||
|
self.logger.info(f"Supervised Epoch {epoch+1}/{epochs}")
|
||||||
|
self.logger.info(f" Train Loss: {train_action_loss:.4f}, Accuracy: {train_acc:.4f}, PnL: {train_pnl:.4f}")
|
||||||
|
self.logger.info(f" Val Loss: {val_action_loss:.4f}, Accuracy: {val_acc:.4f}, PnL: {val_pnl:.4f}")
|
||||||
|
|
||||||
|
# Log timing
|
||||||
|
epoch_time = time.time() - epoch_start
|
||||||
|
self.logger.info(f" Epoch completed in {epoch_time:.2f} seconds")
|
||||||
|
|
||||||
|
# Update global epoch counter
|
||||||
|
self.supervised_epochs += 1
|
||||||
|
|
||||||
|
# Small delay to allow for interruption
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
async def train_reinforcement(self, episodes=2):
|
||||||
|
"""
|
||||||
|
Run reinforcement learning for a specified number of episodes
|
||||||
|
|
||||||
|
Args:
|
||||||
|
episodes: Number of episodes to train
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Training statistics
|
||||||
|
"""
|
||||||
|
from NN.train_rl import RLTradingEnvironment
|
||||||
|
|
||||||
|
# Get data for RL environment
|
||||||
|
window_size = self.config['market_data']['window_size']
|
||||||
|
|
||||||
|
# Get all timeframes data
|
||||||
|
data_dict = self.data_interface.get_multi_timeframe_data(refresh=True)
|
||||||
|
|
||||||
|
if not data_dict:
|
||||||
|
self.logger.error("Failed to fetch data for any timeframe")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Extract key timeframes
|
||||||
|
timeframes = self.config['market_data']['timeframes']
|
||||||
|
|
||||||
|
# Extract features from dataframes
|
||||||
|
features = {}
|
||||||
|
for tf in timeframes:
|
||||||
|
if tf in data_dict:
|
||||||
|
df = data_dict[tf]
|
||||||
|
# Add indicators if not already added
|
||||||
|
if 'rsi' not in df.columns:
|
||||||
|
df = self.data_interface.add_indicators(df)
|
||||||
|
|
||||||
|
# Convert to numpy array with close price as the last column
|
||||||
|
features[tf] = np.hstack([
|
||||||
|
df.drop(['timestamp', 'close'], axis=1).values,
|
||||||
|
df['close'].values.reshape(-1, 1)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Ensure we have all needed timeframes
|
||||||
|
required_tfs = ['1m', '5m', '15m'] # Most common timeframes used by RL
|
||||||
|
for tf in required_tfs:
|
||||||
|
if tf not in features and tf in timeframes:
|
||||||
|
self.logger.error(f"Missing features for timeframe {tf}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# Create environment with our feature data
|
||||||
|
env = RLTradingEnvironment(
|
||||||
|
features_1m=features.get('1m'),
|
||||||
|
features_1h=features.get('1h', features.get('5m')), # Use 5m as fallback
|
||||||
|
features_1d=features.get('1d', features.get('15m')) # Use 15m as fallback
|
||||||
|
)
|
||||||
|
|
||||||
|
# Training stats
|
||||||
|
stats = {
|
||||||
|
"rewards": [],
|
||||||
|
"win_rates": [],
|
||||||
|
"trades": [],
|
||||||
|
"best_reward": -float('inf'),
|
||||||
|
"best_episode": -1
|
||||||
|
}
|
||||||
|
|
||||||
|
# RL training loop
|
||||||
|
for episode in range(episodes):
|
||||||
|
if not running:
|
||||||
|
break
|
||||||
|
|
||||||
|
episode_start = time.time()
|
||||||
|
self.logger.info(f"RL Episode {episode+1}/{episodes}")
|
||||||
|
|
||||||
|
# Reset environment
|
||||||
|
state = env.reset()
|
||||||
|
total_reward = 0
|
||||||
|
trades = 0
|
||||||
|
wins = 0
|
||||||
|
|
||||||
|
# Run one episode
|
||||||
|
done = False
|
||||||
|
max_steps = 1000
|
||||||
|
step = 0
|
||||||
|
|
||||||
|
while not done and step < max_steps:
|
||||||
|
# Use CNN model to enhance state representation if available
|
||||||
|
enhanced_state = self._enhance_state_with_cnn(state)
|
||||||
|
|
||||||
|
# Select action using the RL agent
|
||||||
|
action = self.rl_agent.act(enhanced_state)
|
||||||
|
|
||||||
|
# Take step in environment
|
||||||
|
next_state, reward, done, info = env.step(action)
|
||||||
|
|
||||||
|
# Store in replay buffer
|
||||||
|
self.rl_agent.remember(enhanced_state, action, reward,
|
||||||
|
self._enhance_state_with_cnn(next_state), done)
|
||||||
|
|
||||||
|
# Update episode statistics
|
||||||
|
total_reward += reward
|
||||||
|
state = next_state
|
||||||
|
step += 1
|
||||||
|
|
||||||
|
# Track trades and wins
|
||||||
|
if action != 2: # Not HOLD
|
||||||
|
trades += 1
|
||||||
|
if reward > 0:
|
||||||
|
wins += 1
|
||||||
|
|
||||||
|
# Train the agent on a batch of experiences
|
||||||
|
if len(self.rl_agent.memory) > self.config['training']['batch_size']:
|
||||||
|
self.rl_agent.replay(self.config['training']['batch_size'])
|
||||||
|
|
||||||
|
# Allow for interruption
|
||||||
|
if step % 100 == 0:
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
if not running:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Calculate win rate
|
||||||
|
win_rate = wins / max(1, trades)
|
||||||
|
|
||||||
|
# Update stats
|
||||||
|
stats["rewards"].append(total_reward)
|
||||||
|
stats["win_rates"].append(win_rate)
|
||||||
|
stats["trades"].append(trades)
|
||||||
|
|
||||||
|
# Check if this is the best agent
|
||||||
|
if total_reward > stats["best_reward"]:
|
||||||
|
stats["best_reward"] = total_reward
|
||||||
|
stats["best_episode"] = episode
|
||||||
|
|
||||||
|
# Save the best agent
|
||||||
|
self.rl_agent.save(str(self.models_dir / "rl_agent_best.pth"))
|
||||||
|
|
||||||
|
# Log episode results
|
||||||
|
self.logger.info(f" Reward: {total_reward:.4f}, Win Rate: {win_rate:.4f}, Trades: {trades}")
|
||||||
|
|
||||||
|
# Log timing
|
||||||
|
episode_time = time.time() - episode_start
|
||||||
|
self.logger.info(f" Episode completed in {episode_time:.2f} seconds")
|
||||||
|
|
||||||
|
# Update global episode counter
|
||||||
|
self.rl_episodes += 1
|
||||||
|
|
||||||
|
# Reduce exploration rate
|
||||||
|
self.rl_agent.adjust_epsilon()
|
||||||
|
|
||||||
|
# Small delay to allow for interruption
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
|
|
||||||
|
return stats
|
||||||
|
|
||||||
|
def _enhance_state_with_cnn(self, state):
|
||||||
|
"""
|
||||||
|
Enhance the RL state with CNN feature extraction
|
||||||
|
|
||||||
|
Args:
|
||||||
|
state: The original state from the environment
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
numpy.ndarray: Enhanced state representation
|
||||||
|
"""
|
||||||
|
# This is a placeholder - in a real implementation, you would:
|
||||||
|
# 1. Format the state for the CNN
|
||||||
|
# 2. Get the CNN's feature representation
|
||||||
|
# 3. Combine with the original state features
|
||||||
|
return state
|
||||||
|
|
||||||
|
def _update_training_stats(self, sv_stats, rl_stats):
|
||||||
|
"""Update global training statistics"""
|
||||||
|
global training_stats
|
||||||
|
|
||||||
|
# Update supervised stats
|
||||||
|
if sv_stats:
|
||||||
|
training_stats["supervised"]["epochs_completed"] = self.supervised_epochs
|
||||||
|
if "best_val_pnl" in sv_stats and sv_stats["best_val_pnl"] > training_stats["supervised"]["best_val_pnl"]:
|
||||||
|
training_stats["supervised"]["best_val_pnl"] = sv_stats["best_val_pnl"]
|
||||||
|
training_stats["supervised"]["best_epoch"] = sv_stats["best_epoch"] + training_stats["supervised"]["epochs_completed"] - len(sv_stats["train_losses"])
|
||||||
|
training_stats["supervised"]["best_win_rate"] = sv_stats.get("best_win_rate", 0)
|
||||||
|
|
||||||
|
# Update reinforcement stats
|
||||||
|
if rl_stats:
|
||||||
|
training_stats["reinforcement"]["episodes_completed"] = self.rl_episodes
|
||||||
|
if "best_reward" in rl_stats and rl_stats["best_reward"] > training_stats["reinforcement"]["best_reward"]:
|
||||||
|
training_stats["reinforcement"]["best_reward"] = rl_stats["best_reward"]
|
||||||
|
training_stats["reinforcement"]["best_episode"] = rl_stats["best_episode"] + training_stats["reinforcement"]["episodes_completed"] - len(rl_stats["rewards"])
|
||||||
|
|
||||||
|
# Update hybrid stats
|
||||||
|
training_stats["hybrid"]["iterations_completed"] = self.iter_count
|
||||||
|
training_stats["hybrid"]["last_update"] = datetime.now().isoformat()
|
||||||
|
|
||||||
|
# Calculate combined score (simple formula, can be adjusted)
|
||||||
|
sv_score = training_stats["supervised"]["best_val_pnl"]
|
||||||
|
rl_score = training_stats["reinforcement"]["best_reward"]
|
||||||
|
combined_score = sv_score * 0.7 + rl_score * 0.3 # Weight supervised more
|
||||||
|
|
||||||
|
if combined_score > training_stats["hybrid"]["best_combined_score"]:
|
||||||
|
training_stats["hybrid"]["best_combined_score"] = combined_score
|
||||||
|
|
||||||
|
def _save_models_and_stats(self):
|
||||||
|
"""Save models and training statistics"""
|
||||||
|
# Save training stats
|
||||||
|
try:
|
||||||
|
stats_file = self.models_dir / "hybrid_training_stats.json"
|
||||||
|
with open(stats_file, 'w') as f:
|
||||||
|
json.dump(training_stats, f, indent=2)
|
||||||
|
self.logger.info(f"Training statistics saved to {stats_file}")
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error saving training stats: {str(e)}")
|
||||||
|
|
||||||
|
# Models are already saved in their respective training functions
|
||||||
|
|
||||||
|
def _log_to_tensorboard(self, iteration, sv_stats, rl_stats):
|
||||||
|
"""Log training metrics to TensorBoard"""
|
||||||
|
if not self.tensorboard_writer:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Log supervised metrics
|
||||||
|
if sv_stats and "train_losses" in sv_stats:
|
||||||
|
for i, loss in enumerate(sv_stats["train_losses"]):
|
||||||
|
step = (iteration * len(sv_stats["train_losses"])) + i
|
||||||
|
self.tensorboard_writer.add_scalar('supervised/train_loss', loss, step)
|
||||||
|
self.tensorboard_writer.add_scalar('supervised/val_loss', sv_stats["val_losses"][i], step)
|
||||||
|
self.tensorboard_writer.add_scalar('supervised/train_accuracy', sv_stats["train_accuracies"][i], step)
|
||||||
|
self.tensorboard_writer.add_scalar('supervised/val_accuracy', sv_stats["val_accuracies"][i], step)
|
||||||
|
self.tensorboard_writer.add_scalar('supervised/train_pnl', sv_stats["train_pnls"][i], step)
|
||||||
|
self.tensorboard_writer.add_scalar('supervised/val_pnl', sv_stats["val_pnls"][i], step)
|
||||||
|
|
||||||
|
# Log reinforcement metrics
|
||||||
|
if rl_stats and "rewards" in rl_stats:
|
||||||
|
for i, reward in enumerate(rl_stats["rewards"]):
|
||||||
|
step = (iteration * len(rl_stats["rewards"])) + i
|
||||||
|
self.tensorboard_writer.add_scalar('reinforcement/reward', reward, step)
|
||||||
|
self.tensorboard_writer.add_scalar('reinforcement/win_rate', rl_stats["win_rates"][i], step)
|
||||||
|
self.tensorboard_writer.add_scalar('reinforcement/trades', rl_stats["trades"][i], step)
|
||||||
|
|
||||||
|
# Log hybrid metrics
|
||||||
|
self.tensorboard_writer.add_scalar('hybrid/iterations', self.iter_count, iteration)
|
||||||
|
self.tensorboard_writer.add_scalar('hybrid/combined_score', training_stats["hybrid"]["best_combined_score"], iteration)
|
||||||
|
|
||||||
|
# Flush to ensure data is written
|
||||||
|
self.tensorboard_writer.flush()
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main entry point for the hybrid training script"""
|
||||||
|
parser = argparse.ArgumentParser(description='Hybrid Training Script')
|
||||||
|
parser.add_argument('--iterations', type=int, default=10, help='Number of hybrid iterations to run')
|
||||||
|
parser.add_argument('--sv-epochs', type=int, default=5, help='Supervised epochs per iteration')
|
||||||
|
parser.add_argument('--rl-episodes', type=int, default=2, help='RL episodes per iteration')
|
||||||
|
parser.add_argument('--symbol', type=str, default='BTC/USDT', help='Trading symbol')
|
||||||
|
parser.add_argument('--timeframes', type=str, nargs='+', default=['1m', '5m', '15m'], help='Timeframes to use')
|
||||||
|
parser.add_argument('--window-size', type=int, default=24, help='Window size for models')
|
||||||
|
parser.add_argument('--visualize', action='store_true', help='Enable visualization')
|
||||||
|
parser.add_argument('--config', type=str, help='Path to custom configuration file')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Load configuration
|
||||||
|
if args.config:
|
||||||
|
config = train_config.load_config(args.config)
|
||||||
|
else:
|
||||||
|
# Create custom config from command-line arguments
|
||||||
|
custom_config = {
|
||||||
|
'market_data': {
|
||||||
|
'symbol': args.symbol,
|
||||||
|
'timeframes': args.timeframes,
|
||||||
|
'window_size': args.window_size
|
||||||
|
},
|
||||||
|
'visualization': {
|
||||||
|
'enabled': args.visualize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config = train_config.get_config('hybrid', custom_config)
|
||||||
|
|
||||||
|
# Print startup banner
|
||||||
|
print("=" * 80)
|
||||||
|
print("HYBRID TRAINING SESSION")
|
||||||
|
print("Combining supervised learning (CNN) with reinforcement learning (RL)")
|
||||||
|
print(f"Symbol: {config['market_data']['symbol']}")
|
||||||
|
print(f"Timeframes: {config['market_data']['timeframes']}")
|
||||||
|
print(f"Iterations: {args.iterations} (SV epochs: {args.sv_epochs}, RL episodes: {args.rl_episodes})")
|
||||||
|
print("Press Ctrl+C to safely stop training and save the models")
|
||||||
|
print("=" * 80)
|
||||||
|
|
||||||
|
# Initialize the hybrid model
|
||||||
|
hybrid_model = HybridModel(config)
|
||||||
|
initialized = hybrid_model.initialize()
|
||||||
|
|
||||||
|
if not initialized:
|
||||||
|
print("Failed to initialize hybrid model. Exiting.")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Run training
|
||||||
|
await hybrid_model.train_hybrid(
|
||||||
|
iterations=args.iterations,
|
||||||
|
sv_epochs_per_iter=args.sv_epochs,
|
||||||
|
rl_episodes_per_iter=args.rl_episodes
|
||||||
|
)
|
||||||
|
|
||||||
|
print("Training completed successfully.")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("Training interrupted by user.")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error during training: {str(e)}")
|
||||||
|
import traceback
|
||||||
|
traceback.print_exc()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
asyncio.run(main())
|
@ -39,6 +39,14 @@ project_root = os.path.dirname(os.path.abspath(__file__))
|
|||||||
if project_root not in sys.path:
|
if project_root not in sys.path:
|
||||||
sys.path.append(project_root)
|
sys.path.append(project_root)
|
||||||
|
|
||||||
|
# Global variable to store agent instance
|
||||||
|
_agent_instance = None
|
||||||
|
|
||||||
|
def get_agent():
|
||||||
|
"""Return the current agent instance for external use"""
|
||||||
|
global _agent_instance
|
||||||
|
return _agent_instance
|
||||||
|
|
||||||
# Set up GPU/CUDA if available
|
# Set up GPU/CUDA if available
|
||||||
def setup_gpu():
|
def setup_gpu():
|
||||||
"""
|
"""
|
||||||
@ -377,36 +385,86 @@ class RLTrainingIntegrator:
|
|||||||
|
|
||||||
# Create a custom environment class that includes our reward function modification
|
# Create a custom environment class that includes our reward function modification
|
||||||
class EnhancedRLTradingEnvironment(RLTradingEnvironment):
|
class EnhancedRLTradingEnvironment(RLTradingEnvironment):
|
||||||
def __init__(self, features_1m, features_5m, features_15m, window_size=20, trading_fee=0.0025, min_trade_interval=15):
|
def __init__(self, features_1m, features_5m, features_15m, window_size=20, trading_fee=0.0025, min_trade_interval=15, symbol='BTCUSDT'):
|
||||||
super().__init__(features_1m, features_5m, features_15m, window_size, trading_fee, min_trade_interval)
|
"""Initialize the Enhanced RL trading environment with multi-timeframe support"""
|
||||||
|
# Store symbol explicitly for data interface to use
|
||||||
|
self.symbol = symbol
|
||||||
|
|
||||||
# Reference to integrator for tracking
|
# Make sure features are all available and are numpy arrays
|
||||||
|
if features_1m is None or features_5m is None or features_15m is None:
|
||||||
|
raise ValueError("All timeframe features are required (1m, 5m, 15m)")
|
||||||
|
|
||||||
|
# Get 1h and 1d data from the DataInterface directly
|
||||||
|
try:
|
||||||
|
from NN.utils.data_interface import DataInterface
|
||||||
|
data_interface = DataInterface(symbol=self.symbol, timeframes=['1h', '1d'])
|
||||||
|
|
||||||
|
# Get 1h and 1d data
|
||||||
|
data_1h = data_interface.get_historical_data(timeframe='1h', n_candles=1000)
|
||||||
|
data_1d = data_interface.get_historical_data(timeframe='1d', n_candles=500)
|
||||||
|
|
||||||
|
# Add technical indicators
|
||||||
|
data_1h = data_interface.add_technical_indicators(data_1h)
|
||||||
|
data_1d = data_interface.add_technical_indicators(data_1d)
|
||||||
|
|
||||||
|
# Convert to numpy arrays
|
||||||
|
features_1h = np.hstack([
|
||||||
|
data_1h.drop(['timestamp', 'close'], axis=1).values,
|
||||||
|
data_1h['close'].values.reshape(-1, 1)
|
||||||
|
])
|
||||||
|
|
||||||
|
features_1d = np.hstack([
|
||||||
|
data_1d.drop(['timestamp', 'close'], axis=1).values,
|
||||||
|
data_1d['close'].values.reshape(-1, 1)
|
||||||
|
])
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading 1h and 1d data: {str(e)}")
|
||||||
|
raise ValueError("Could not load required timeframe data (1h, 1d)")
|
||||||
|
|
||||||
|
# Convert features to numpy arrays if needed
|
||||||
|
features_1m_np = np.array(features_1m, dtype=np.float32) if not isinstance(features_1m, np.ndarray) else features_1m
|
||||||
|
features_1h_np = np.array(features_1h, dtype=np.float32) if not isinstance(features_1h, np.ndarray) else features_1h
|
||||||
|
features_1d_np = np.array(features_1d, dtype=np.float32) if not isinstance(features_1d, np.ndarray) else features_1d
|
||||||
|
|
||||||
|
# Initialize parent class with real data only
|
||||||
|
super().__init__(features_1m_np, features_1h_np, features_1d_np, window_size, trading_fee, min_trade_interval)
|
||||||
|
|
||||||
|
# Add enhanced state tracking
|
||||||
self.integrator = None
|
self.integrator = None
|
||||||
|
self.chart = None
|
||||||
# Store the original data for extrema analysis
|
|
||||||
self.original_data = None
|
|
||||||
|
|
||||||
# RNN signal integration
|
|
||||||
self.signal_interpreter = None
|
|
||||||
self.last_rnn_signals = []
|
|
||||||
self.rnn_signal_weight = 0.3 # Weight for RNN signals in decision making
|
|
||||||
|
|
||||||
# TensorBoard writer
|
|
||||||
self.writer = None
|
self.writer = None
|
||||||
|
self.signal_interpreter = None
|
||||||
|
|
||||||
# Initialize improved reward calculator if available
|
# Add reward enhancement
|
||||||
self.use_improved_reward = reward_calculator_available
|
self.use_improved_reward = reward_calculator_available
|
||||||
if self.use_improved_reward:
|
if self.use_improved_reward:
|
||||||
self.reward_calculator = ImprovedRewardCalculator(
|
self.reward_calculator = ImprovedRewardCalculator(
|
||||||
base_fee_rate=trading_fee,
|
base_reward=1.0,
|
||||||
max_frequency_penalty=0.005,
|
profit_factor=2.0, # Higher reward for profitable trades
|
||||||
holding_reward_rate=0.0002,
|
loss_factor=1.0, # Standard penalty for losses
|
||||||
risk_adjusted=True
|
trade_frequency_penalty=0.3, # Penalty for frequent trading
|
||||||
|
position_duration_factor=0.05 # Small reward for longer positions
|
||||||
)
|
)
|
||||||
logging.info("Using improved reward function with risk adjustment")
|
logger.info("Using improved reward calculator")
|
||||||
|
else:
|
||||||
|
logger.info("Using default reward function")
|
||||||
|
|
||||||
|
# Add advanced tracking metrics
|
||||||
|
self.unrealized_pnl = 0.0
|
||||||
|
self.best_reward = -np.inf
|
||||||
|
self.worst_reward = np.inf
|
||||||
|
self.rewards_history = []
|
||||||
|
self.actions_history = []
|
||||||
|
self.daily_pnl = {}
|
||||||
|
self.hourly_pnl = {}
|
||||||
|
|
||||||
|
# Use GPU if available for faster inference
|
||||||
|
self.use_gpu = torch.cuda.is_available()
|
||||||
|
if self.use_gpu:
|
||||||
|
logger.info("GPU available for trading environment")
|
||||||
|
|
||||||
def set_integrator(self, integrator):
|
def set_integrator(self, integrator):
|
||||||
"""Set reference to integrator for callbacks"""
|
"""Set reference to integrator for UI control"""
|
||||||
self.integrator = integrator
|
self.integrator = integrator
|
||||||
|
|
||||||
def set_signal_interpreter(self, signal_interpreter):
|
def set_signal_interpreter(self, signal_interpreter):
|
||||||
@ -419,188 +477,179 @@ class RLTrainingIntegrator:
|
|||||||
|
|
||||||
def _calculate_reward(self, action):
|
def _calculate_reward(self, action):
|
||||||
"""Override the reward calculation with our enhanced version"""
|
"""Override the reward calculation with our enhanced version"""
|
||||||
# Get current and next price
|
try:
|
||||||
current_price = self.features_1m[self.current_step, -1]
|
# Get current and next price
|
||||||
next_price = self.features_1m[self.current_step + 1, -1]
|
current_price = self.features_1m[self.current_step, -1]
|
||||||
|
next_price = self.features_1m[min(self.current_step + 1, len(self.features_1m) - 1), -1]
|
||||||
# Get real market price if available (from integrator)
|
|
||||||
real_market_price = None
|
|
||||||
if self.integrator and hasattr(self.integrator, 'chart') and self.integrator.chart:
|
|
||||||
if hasattr(self.integrator.chart, 'tick_storage'):
|
|
||||||
real_market_price = self.integrator.chart.tick_storage.get_latest_price()
|
|
||||||
|
|
||||||
# Use actual market price if available, otherwise use the candle price
|
|
||||||
price_to_use = real_market_price if real_market_price else current_price
|
|
||||||
|
|
||||||
# Calculate price change and initial variables
|
|
||||||
price_change = 0
|
|
||||||
if self.integrator and self.integrator.entry_price:
|
|
||||||
price_change = (price_to_use - self.integrator.entry_price) / self.integrator.entry_price
|
|
||||||
|
|
||||||
# Calculate position held time
|
|
||||||
position_held_time = 0
|
|
||||||
if self.integrator and self.integrator.entry_time:
|
|
||||||
position_held_time = self.current_step - self.integrator.entry_time
|
|
||||||
|
|
||||||
# Determine if position is profitable
|
|
||||||
is_profitable = False
|
|
||||||
if price_change > 0:
|
|
||||||
is_profitable = True
|
|
||||||
|
|
||||||
# If using improved reward calculator
|
|
||||||
if self.use_improved_reward:
|
|
||||||
# Convert our action to the format expected by the reward calculator
|
|
||||||
# 0:BUY, 1:SELL, 2:HOLD -> For calculator it's the same
|
|
||||||
reward_calc_action = action
|
|
||||||
|
|
||||||
# Calculate reward using the improved calculator
|
# Get real market price if available (from integrator)
|
||||||
reward = self.reward_calculator.calculate_reward(
|
real_market_price = None
|
||||||
action=reward_calc_action,
|
if self.integrator and hasattr(self.integrator, 'chart') and self.integrator.chart:
|
||||||
price_change=price_change,
|
if hasattr(self.integrator.chart, 'tick_storage'):
|
||||||
position_held_time=position_held_time,
|
real_market_price = self.integrator.chart.tick_storage.get_latest_price()
|
||||||
is_profitable=is_profitable
|
|
||||||
)
|
|
||||||
|
|
||||||
# Record the trade for frequency tracking
|
# Use actual market price if available, otherwise use the candle price
|
||||||
self.reward_calculator.record_trade(
|
price_to_use = real_market_price if real_market_price else current_price
|
||||||
timestamp=datetime.now(),
|
|
||||||
action=action,
|
|
||||||
price=price_to_use
|
|
||||||
)
|
|
||||||
|
|
||||||
# If we have a PnL result, record it
|
# Calculate price change and initial variables
|
||||||
if action == 1 and self.integrator and self.integrator.current_position_size > 0:
|
price_change = 0
|
||||||
pnl = price_change - (self.trading_fee * 2) # Account for entry and exit fees
|
if self.integrator and self.integrator.entry_price:
|
||||||
self.reward_calculator.record_pnl(pnl)
|
price_change = (price_to_use - self.integrator.entry_price) / self.integrator.entry_price
|
||||||
|
|
||||||
# Log the reward calculation
|
# Calculate position held time
|
||||||
logging.debug(f"Improved reward for action {action}: {reward:.6f}")
|
position_held_time = 0
|
||||||
|
if self.integrator and self.integrator.entry_time:
|
||||||
|
position_held_time = self.current_step - self.integrator.entry_time
|
||||||
|
|
||||||
return reward, price_change
|
# Determine if position is profitable
|
||||||
|
is_profitable = price_change > 0
|
||||||
# Default values if not using improved calculator
|
|
||||||
pnl = 0.0
|
|
||||||
reward = -0.0001 # Small negative reward to discourage excessive actions
|
|
||||||
|
|
||||||
# Calculate base reward based on position and price change
|
|
||||||
if action == 0: # BUY
|
|
||||||
# Apply fee directly as negative reward to discourage excessive trading
|
|
||||||
reward -= self.trading_fee
|
|
||||||
|
|
||||||
# Check if we already have a position
|
# If using improved reward calculator
|
||||||
if self.integrator and self.integrator.current_position_size > 0:
|
if self.use_improved_reward:
|
||||||
reward -= 0.002 # Additional penalty for trying to buy when already in position
|
# Convert our action to the format expected by the reward calculator
|
||||||
|
# 0:BUY, 1:SELL, 2:HOLD -> For calculator it's the same
|
||||||
# If RNN signal available, incorporate it
|
reward_calc_action = action
|
||||||
if self.signal_interpreter and len(self.last_rnn_signals) > 0:
|
|
||||||
last_signal = self.last_rnn_signals[-1]
|
|
||||||
if last_signal['action'] == 'BUY':
|
|
||||||
# RNN also suggests BUY - boost reward
|
|
||||||
reward += 0.003 * self.rnn_signal_weight * last_signal.get('confidence', 1.0)
|
|
||||||
elif last_signal['action'] == 'SELL':
|
|
||||||
# RNN suggests opposite - reduce reward
|
|
||||||
reward -= 0.003 * self.rnn_signal_weight * last_signal.get('confidence', 1.0)
|
|
||||||
|
|
||||||
elif action == 1: # SELL
|
|
||||||
if self.integrator and self.integrator.current_position_size > 0:
|
|
||||||
# Calculate potential profit/loss
|
|
||||||
if self.integrator.entry_price:
|
|
||||||
price_to_use = real_market_price if real_market_price else current_price
|
|
||||||
pnl = (price_to_use - self.integrator.entry_price) / self.integrator.entry_price
|
|
||||||
|
|
||||||
# Base reward on actual PnL
|
|
||||||
reward = pnl * 10
|
|
||||||
|
|
||||||
# Apply fee as negative component
|
|
||||||
reward -= self.trading_fee
|
|
||||||
|
|
||||||
# If RNN signal available, incorporate it
|
|
||||||
if self.signal_interpreter and len(self.last_rnn_signals) > 0:
|
|
||||||
last_signal = self.last_rnn_signals[-1]
|
|
||||||
if last_signal['action'] == 'SELL':
|
|
||||||
# RNN also suggests SELL - boost reward
|
|
||||||
reward += 0.003 * self.rnn_signal_weight * last_signal.get('confidence', 1.0)
|
|
||||||
elif last_signal['action'] == 'BUY':
|
|
||||||
# RNN suggests opposite - reduce reward
|
|
||||||
reward -= 0.003 * self.rnn_signal_weight * last_signal.get('confidence', 1.0)
|
|
||||||
else:
|
|
||||||
# No position to sell - penalize
|
|
||||||
reward = -0.005
|
|
||||||
|
|
||||||
elif action == 2: # HOLD
|
|
||||||
# Check if we're holding a profitable position
|
|
||||||
if self.integrator and self.integrator.current_position_size > 0 and self.integrator.entry_price:
|
|
||||||
price_to_use = real_market_price if real_market_price else current_price
|
|
||||||
pnl = (price_to_use - self.integrator.entry_price) / self.integrator.entry_price
|
|
||||||
|
|
||||||
# Encourage holding profitable positions
|
# Calculate reward using the improved calculator
|
||||||
if pnl > 0:
|
reward = self.reward_calculator.calculate_reward(
|
||||||
reward = 0.0001 * pnl * 5 # Small positive reward for holding winner
|
action=reward_calc_action,
|
||||||
|
price_change=price_change,
|
||||||
# If position is very profitable, increase hold reward
|
position_held_time=position_held_time,
|
||||||
if pnl > 0.01: # Over 1% profit
|
is_profitable=is_profitable
|
||||||
reward *= 2
|
)
|
||||||
else:
|
|
||||||
# Small negative reward for holding losing position
|
# Record the trade for frequency tracking
|
||||||
reward = -0.0001 * abs(pnl) * 2
|
self.reward_calculator.record_trade(
|
||||||
|
timestamp=datetime.now(),
|
||||||
# If RNN signal suggests HOLD, add small reward
|
action=action,
|
||||||
if self.signal_interpreter and len(self.last_rnn_signals) > 0:
|
price=price_to_use
|
||||||
last_signal = self.last_rnn_signals[-1]
|
)
|
||||||
if last_signal['action'] == 'HOLD':
|
|
||||||
reward += 0.0001 * self.rnn_signal_weight
|
# If we have a PnL result, record it
|
||||||
|
if action == 1 and self.integrator and self.integrator.current_position_size > 0:
|
||||||
|
pnl = price_change - (self.trading_fee * 2) # Account for entry and exit fees
|
||||||
|
self.reward_calculator.record_pnl(pnl)
|
||||||
|
|
||||||
|
# Log the reward calculation
|
||||||
|
logging.debug(f"Improved reward for action {action}: {reward:.6f}")
|
||||||
|
|
||||||
|
return reward, price_change
|
||||||
|
|
||||||
|
# Default values if not using improved calculator
|
||||||
|
pnl = 0.0
|
||||||
|
reward = 0.0
|
||||||
|
|
||||||
|
# Simplified reward calculation based on action and price change
|
||||||
|
if action == 0: # BUY
|
||||||
|
# Reward for buying if price goes up, penalty if it goes down
|
||||||
|
future_return = (next_price - current_price) / current_price
|
||||||
|
reward = future_return * 100 # Scale the reward for better learning
|
||||||
|
pnl = future_return
|
||||||
|
|
||||||
|
elif action == 1: # SELL
|
||||||
|
# Reward for selling if price goes down, penalty if it goes up
|
||||||
|
future_return = (current_price - next_price) / current_price
|
||||||
|
reward = future_return * 100 # Scale the reward for better learning
|
||||||
|
pnl = future_return
|
||||||
|
|
||||||
|
else: # HOLD
|
||||||
|
# Small penalty for holding to encourage action
|
||||||
|
reward = -0.01
|
||||||
|
pnl = 0
|
||||||
|
|
||||||
|
# Record metrics for the reward and action
|
||||||
|
self.rewards_history.append(reward)
|
||||||
|
self.actions_history.append(action)
|
||||||
|
|
||||||
|
# Update best/worst reward
|
||||||
|
self.best_reward = max(self.best_reward, reward)
|
||||||
|
self.worst_reward = min(self.worst_reward, reward)
|
||||||
|
|
||||||
|
# Record to TensorBoard if available
|
||||||
|
if self.writer:
|
||||||
|
self.writer.add_scalar(f'action/reward_{action}', reward, self.current_step)
|
||||||
|
|
||||||
|
return reward, pnl
|
||||||
|
|
||||||
# Add price to history - use real market price if available
|
except Exception as e:
|
||||||
if real_market_price is not None:
|
logger.error(f"Error in reward calculation: {str(e)}")
|
||||||
# For extrema detection, use a normalized version of the real price
|
import traceback
|
||||||
# to keep scale consistent with the model's price history
|
logger.error(traceback.format_exc())
|
||||||
self.integrator.price_history.append(current_price)
|
# Return safe default values
|
||||||
else:
|
return 0.0, 0.0
|
||||||
self.integrator.price_history.append(current_price)
|
|
||||||
|
|
||||||
# Apply extrema-based reward modifications
|
def step(self, action):
|
||||||
if len(self.integrator.price_history) > 20:
|
"""Override step to add additional features"""
|
||||||
# Detect local extrema
|
try:
|
||||||
tops_indices, bottoms_indices = self.integrator.extrema_detector.find_extrema(
|
# Call parent step method
|
||||||
self.integrator.price_history
|
next_state, reward, done, info = super().step(action)
|
||||||
)
|
|
||||||
|
|
||||||
# Get current price and market context
|
# Add additional metrics to info
|
||||||
current_price = self.integrator.price_history[-1]
|
if hasattr(self, 'best_reward'):
|
||||||
|
info['best_reward'] = self.best_reward
|
||||||
|
info['worst_reward'] = self.worst_reward
|
||||||
|
|
||||||
# Check if we're near a local extrema (top or bottom)
|
# Get action distribution if we have enough history
|
||||||
is_near_bottom = any(i > len(self.integrator.price_history) - 5 for i in bottoms_indices)
|
if len(self.actions_history) >= 10:
|
||||||
is_near_top = any(i > len(self.integrator.price_history) - 5 for i in tops_indices)
|
action_counts = np.bincount(self.actions_history[-10:], minlength=3)
|
||||||
|
action_pcts = action_counts / sum(action_counts)
|
||||||
|
info['action_distribution'] = action_pcts
|
||||||
|
|
||||||
# Modify reward based on action and extrema
|
# Update TensorBoard metrics
|
||||||
if action == 0 and is_near_bottom: # BUY near bottom
|
if self.writer:
|
||||||
logger.info("Buying near local bottom - adding bonus reward")
|
self.writer.add_scalar('metrics/balance', self.balance, self.current_step)
|
||||||
reward += 0.015 # Significant bonus
|
self.writer.add_scalar('metrics/position', self.position, self.current_step)
|
||||||
elif action == 0 and is_near_top: # BUY near top
|
|
||||||
logger.info("Buying near local top - applying penalty")
|
# Track win rate if we have trades
|
||||||
reward -= 0.01 # Penalty
|
if self.trades > 0:
|
||||||
elif action == 1 and is_near_top: # SELL near top
|
win_rate = self.wins / self.trades
|
||||||
logger.info("Selling near local top - adding bonus reward")
|
self.writer.add_scalar('metrics/win_rate', win_rate, self.current_step)
|
||||||
reward += 0.015 # Significant bonus
|
|
||||||
elif action == 1 and is_near_bottom: # SELL near bottom
|
return next_state, reward, done, info
|
||||||
logger.info("Selling near local bottom - applying penalty")
|
|
||||||
reward -= 0.01 # Penalty
|
|
||||||
elif action == 2: # HOLD
|
|
||||||
if is_near_bottom and self.integrator.current_position_size > 0:
|
|
||||||
# Good to hold if we have positions at bottom
|
|
||||||
reward += 0.002 # Small bonus
|
|
||||||
elif is_near_top and self.integrator.current_position_size == 0:
|
|
||||||
# Good to hold if we have no positions at top
|
|
||||||
reward += 0.002 # Small bonus
|
|
||||||
|
|
||||||
# Limit extreme rewards
|
except Exception as e:
|
||||||
reward = max(min(reward, 0.5), -0.5)
|
logger.error(f"Error in environment step: {str(e)}")
|
||||||
|
import traceback
|
||||||
return reward, pnl
|
logger.error(traceback.format_exc())
|
||||||
|
# Return safe default values in case of error
|
||||||
|
return self._get_observation(), 0.0, True, {}
|
||||||
|
|
||||||
# Create a custom environment class factory
|
# Create a custom environment class factory
|
||||||
def create_enhanced_env(features_1m, features_5m, features_15m):
|
def create_enhanced_env(features_1m, features_5m, features_15m):
|
||||||
env = EnhancedRLTradingEnvironment(features_1m, features_5m, features_15m)
|
# Ensure we have all required timeframes
|
||||||
|
if features_1m is None or features_5m is None or features_15m is None:
|
||||||
|
raise ValueError("All timeframe features are required (1m, 5m, 15m)")
|
||||||
|
|
||||||
|
# Get 1h and 1d data from the DataInterface directly
|
||||||
|
try:
|
||||||
|
from NN.utils.data_interface import DataInterface
|
||||||
|
data_interface = DataInterface(symbol=self.symbol, timeframes=['1h', '1d'])
|
||||||
|
|
||||||
|
# Get 1h and 1d data
|
||||||
|
data_1h = data_interface.get_historical_data(timeframe='1h', n_candles=1000)
|
||||||
|
data_1d = data_interface.get_historical_data(timeframe='1d', n_candles=500)
|
||||||
|
|
||||||
|
# Add technical indicators
|
||||||
|
data_1h = data_interface.add_technical_indicators(data_1h)
|
||||||
|
data_1d = data_interface.add_technical_indicators(data_1d)
|
||||||
|
|
||||||
|
# Convert to numpy arrays
|
||||||
|
features_1h = np.hstack([
|
||||||
|
data_1h.drop(['timestamp', 'close'], axis=1).values,
|
||||||
|
data_1h['close'].values.reshape(-1, 1)
|
||||||
|
])
|
||||||
|
|
||||||
|
features_1d = np.hstack([
|
||||||
|
data_1d.drop(['timestamp', 'close'], axis=1).values,
|
||||||
|
data_1d['close'].values.reshape(-1, 1)
|
||||||
|
])
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error loading 1h and 1d data: {str(e)}")
|
||||||
|
raise ValueError("Could not load required timeframe data (1h, 1d)")
|
||||||
|
|
||||||
|
# Create environment with all real data timeframes
|
||||||
|
env = EnhancedRLTradingEnvironment(features_1m, features_5m, features_15m, symbol=self.symbol)
|
||||||
|
|
||||||
# Set the integrator after creation
|
# Set the integrator after creation
|
||||||
env.integrator = self
|
env.integrator = self
|
||||||
# Set the chart from the integrator
|
# Set the chart from the integrator
|
||||||
@ -906,9 +955,15 @@ class RLTrainingIntegrator:
|
|||||||
# Log episode results
|
# Log episode results
|
||||||
logger.info(f"Episode {episode} completed")
|
logger.info(f"Episode {episode} completed")
|
||||||
logger.info(f" Total reward: {reward:.4f}")
|
logger.info(f" Total reward: {reward:.4f}")
|
||||||
logger.info(f" PnL: {info['gain']:.4f}")
|
|
||||||
logger.info(f" Win rate: {info['win_rate']:.4f}")
|
# Check if info contains the expected keys, provide defaults if missing
|
||||||
logger.info(f" Trades: {info['trades']}")
|
gain = info.get('gain', 0.0)
|
||||||
|
win_rate = info.get('win_rate', 0.0)
|
||||||
|
trades = info.get('trades', 0)
|
||||||
|
|
||||||
|
logger.info(f" PnL: {gain:.4f}")
|
||||||
|
logger.info(f" Win rate: {win_rate:.4f}")
|
||||||
|
logger.info(f" Trades: {trades}")
|
||||||
|
|
||||||
# Log session-wide PnL
|
# Log session-wide PnL
|
||||||
session_win_rate = self.session_wins / self.session_trades if self.session_trades > 0 else 0
|
session_win_rate = self.session_wins / self.session_trades if self.session_trades > 0 else 0
|
||||||
@ -1004,8 +1059,18 @@ async def start_realtime_chart(symbol="ETH/USDT", port=8050, manual_mode=False):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
logger.info(f"Initializing RealTimeChart for {symbol}")
|
logger.info(f"Initializing RealTimeChart for {symbol}")
|
||||||
# Create the chart with the simplified constructor
|
# Create the chart with proper parameters to ensure initialization works
|
||||||
chart = RealTimeChart(symbol)
|
chart = RealTimeChart(
|
||||||
|
app=None, # Create its own Dash app
|
||||||
|
symbol=symbol,
|
||||||
|
timeframe='1m',
|
||||||
|
standalone=True,
|
||||||
|
chart_title=f"{symbol} Realtime Trading Chart",
|
||||||
|
debug_mode=True,
|
||||||
|
port=port,
|
||||||
|
show_volume=True,
|
||||||
|
show_indicators=True
|
||||||
|
)
|
||||||
|
|
||||||
# Add backward compatibility methods
|
# Add backward compatibility methods
|
||||||
chart.add_trade = lambda price, timestamp, amount, pnl=0.0, action="BUY": _add_trade_compat(chart, price, timestamp, amount, pnl, action)
|
chart.add_trade = lambda price, timestamp, amount, pnl=0.0, action="BUY": _add_trade_compat(chart, price, timestamp, amount, pnl, action)
|
||||||
@ -1049,7 +1114,7 @@ def _add_trade_compat(chart, price, timestamp, amount, pnl=0.0, action="BUY"):
|
|||||||
entry_price=price,
|
entry_price=price,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
timestamp=timestamp,
|
timestamp=timestamp,
|
||||||
fee_rate=0.001 # 0.1% fee rate
|
fee_rate=0.0002 # 0.02% fee rate
|
||||||
)
|
)
|
||||||
|
|
||||||
# Track this trade for rate calculation
|
# Track this trade for rate calculation
|
||||||
@ -1109,6 +1174,8 @@ def run_training_thread(chart, num_episodes=5000, skip_training=False, max_posit
|
|||||||
def training_thread_func():
|
def training_thread_func():
|
||||||
"""Training thread function"""
|
"""Training thread function"""
|
||||||
try:
|
try:
|
||||||
|
global _agent_instance
|
||||||
|
|
||||||
# Create the integrator object
|
# Create the integrator object
|
||||||
integrator = RLTrainingIntegrator(
|
integrator = RLTrainingIntegrator(
|
||||||
chart=chart,
|
chart=chart,
|
||||||
@ -1132,6 +1199,8 @@ def run_training_thread(chart, num_episodes=5000, skip_training=False, max_posit
|
|||||||
if agent:
|
if agent:
|
||||||
logger.info("Loaded pre-trained agent")
|
logger.info("Loaded pre-trained agent")
|
||||||
integrator.agent = agent
|
integrator.agent = agent
|
||||||
|
# Store agent instance for external access
|
||||||
|
_agent_instance = agent
|
||||||
else:
|
else:
|
||||||
logger.warning("No pre-trained agent found")
|
logger.warning("No pre-trained agent found")
|
||||||
else:
|
else:
|
||||||
@ -1142,6 +1211,9 @@ def run_training_thread(chart, num_episodes=5000, skip_training=False, max_posit
|
|||||||
# Use a small number of episodes to test termination handling
|
# Use a small number of episodes to test termination handling
|
||||||
logger.info(f"Starting training with {num_episodes} episodes and max_position={max_position}")
|
logger.info(f"Starting training with {num_episodes} episodes and max_position={max_position}")
|
||||||
integrator.run_training(episodes=num_episodes, max_steps=2000)
|
integrator.run_training(episodes=num_episodes, max_steps=2000)
|
||||||
|
|
||||||
|
# Store agent instance for external access
|
||||||
|
_agent_instance = integrator.agent
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error in training thread: {str(e)}")
|
logger.error(f"Error in training thread: {str(e)}")
|
||||||
import traceback
|
import traceback
|
||||||
@ -1213,6 +1285,7 @@ async def main():
|
|||||||
logger.info(f"Starting realtime chart with {'manual mode' if args.manual_trades else 'auto mode'}")
|
logger.info(f"Starting realtime chart with {'manual mode' if args.manual_trades else 'auto mode'}")
|
||||||
chart, websocket_task = await start_realtime_chart(
|
chart, websocket_task = await start_realtime_chart(
|
||||||
symbol="ETH/USDT",
|
symbol="ETH/USDT",
|
||||||
|
port=8050,
|
||||||
manual_mode=args.manual_trades
|
manual_mode=args.manual_trades
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1281,6 +1354,14 @@ if __name__ == "__main__":
|
|||||||
console.setFormatter(formatter)
|
console.setFormatter(formatter)
|
||||||
logging.getLogger('').addHandler(console)
|
logging.getLogger('').addHandler(console)
|
||||||
|
|
||||||
|
# Print prominent warning about synthetic data
|
||||||
|
logger.warning("===========================================================")
|
||||||
|
logger.warning("IMPORTANT: ONLY REAL MARKET DATA IS SUPPORTED")
|
||||||
|
logger.warning("This system does NOT use synthetic data for training or inference")
|
||||||
|
logger.warning("All timeframes (1m, 5m, 15m, 1h, 1d) must be available as real data")
|
||||||
|
logger.warning("See REAL_MARKET_DATA_POLICY.md for more information")
|
||||||
|
logger.warning("===========================================================")
|
||||||
|
|
||||||
logger.info("Starting RL training with real-time visualization")
|
logger.info("Starting RL training with real-time visualization")
|
||||||
logger.info(f"Episodes: {args.episodes}")
|
logger.info(f"Episodes: {args.episodes}")
|
||||||
logger.info(f"No-train: {args.no_train}")
|
logger.info(f"No-train: {args.no_train}")
|
||||||
|
Binary file not shown.
Before Width: | Height: | Size: 159 KiB After Width: | Height: | Size: 75 KiB |
49
utils/README.md
Normal file
49
utils/README.md
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# Utility Scripts
|
||||||
|
|
||||||
|
This directory contains utility scripts for managing the trading bot development environment.
|
||||||
|
|
||||||
|
## Port Management Utility
|
||||||
|
|
||||||
|
The `port_manager.py` script provides utilities for managing ports and killing stale processes:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Kill all stale debug instances
|
||||||
|
python utils/port_manager.py --kill-stale
|
||||||
|
|
||||||
|
# Free a specific port
|
||||||
|
python utils/port_manager.py --free-port 6007
|
||||||
|
|
||||||
|
# Find an available port in a range
|
||||||
|
python utils/port_manager.py --find-port --min-port 6000 --max-port 7000
|
||||||
|
|
||||||
|
# Try to use a preferred port with fallback
|
||||||
|
python utils/port_manager.py --preferred-port 6007 --min-port 6000 --max-port 7000
|
||||||
|
```
|
||||||
|
|
||||||
|
## TensorBoard Launcher
|
||||||
|
|
||||||
|
The `launch_tensorboard.py` script launches TensorBoard with automatic port management:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Launch TensorBoard with default settings
|
||||||
|
python utils/launch_tensorboard.py
|
||||||
|
|
||||||
|
# Launch with custom log directory and port range
|
||||||
|
python utils/launch_tensorboard.py --logdir=path/to/logs --preferred-port=6007 --port-range=6000-7000
|
||||||
|
|
||||||
|
# Launch and kill stale processes first
|
||||||
|
python utils/launch_tensorboard.py --kill-stale
|
||||||
|
```
|
||||||
|
|
||||||
|
## Integration with VSCode Tasks
|
||||||
|
|
||||||
|
These utilities are integrated with VSCode tasks in `.vscode/tasks.json`:
|
||||||
|
|
||||||
|
1. **Start TensorBoard**: Launches TensorBoard with automatic port management
|
||||||
|
2. **Kill Stale Processes**: Kills all stale debug instances
|
||||||
|
|
||||||
|
You can run these tasks from the VSCode command palette (Ctrl+Shift+P) by typing "Tasks: Run Task" and selecting the task.
|
||||||
|
|
||||||
|
## Requirements
|
||||||
|
|
||||||
|
These utilities require the `psutil` package, which is included in the project's `requirements.txt` file.
|
19
utils/__init__.py
Normal file
19
utils/__init__.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
"""
|
||||||
|
Utility functions for port management, launching services, and debug tools.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from utils.port_manager import (
|
||||||
|
is_port_in_use,
|
||||||
|
find_available_port,
|
||||||
|
kill_process_by_port,
|
||||||
|
kill_stale_debug_instances,
|
||||||
|
get_port_with_fallback
|
||||||
|
)
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
'is_port_in_use',
|
||||||
|
'find_available_port',
|
||||||
|
'kill_process_by_port',
|
||||||
|
'kill_stale_debug_instances',
|
||||||
|
'get_port_with_fallback'
|
||||||
|
]
|
164
utils/launch_tensorboard.py
Normal file
164
utils/launch_tensorboard.py
Normal file
@ -0,0 +1,164 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
TensorBoard Launcher with Automatic Port Management
|
||||||
|
|
||||||
|
This script launches TensorBoard with automatic port fallback if the preferred port is in use.
|
||||||
|
It also kills any stale debug instances that might be running.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python launch_tensorboard.py --logdir=path/to/logs --preferred-port=6007 --port-range=6000-7000
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
# Add project root to path
|
||||||
|
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
if project_root not in sys.path:
|
||||||
|
sys.path.append(project_root)
|
||||||
|
|
||||||
|
from utils.port_manager import get_port_with_fallback, kill_stale_debug_instances
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
logger = logging.getLogger('tensorboard_launcher')
|
||||||
|
|
||||||
|
def launch_tensorboard(logdir, port, host='localhost', open_browser=True):
|
||||||
|
"""
|
||||||
|
Launch TensorBoard on the specified port
|
||||||
|
|
||||||
|
Args:
|
||||||
|
logdir (str): Path to log directory
|
||||||
|
port (int): Port to use
|
||||||
|
host (str): Host to bind to
|
||||||
|
open_browser (bool): Whether to open browser automatically
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
subprocess.Popen: Process object
|
||||||
|
"""
|
||||||
|
cmd = [
|
||||||
|
sys.executable, "-m", "tensorboard.main",
|
||||||
|
f"--logdir={logdir}",
|
||||||
|
f"--port={port}",
|
||||||
|
f"--host={host}"
|
||||||
|
]
|
||||||
|
|
||||||
|
# Add --load_fast=false to improve startup times
|
||||||
|
cmd.append("--load_fast=false")
|
||||||
|
|
||||||
|
# Control whether to open browser
|
||||||
|
if not open_browser:
|
||||||
|
cmd.append("--window_title=TensorBoard")
|
||||||
|
|
||||||
|
logger.info(f"Launching TensorBoard: {' '.join(cmd)}")
|
||||||
|
|
||||||
|
# Use subprocess.Popen to start TensorBoard without waiting for it to finish
|
||||||
|
process = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.STDOUT,
|
||||||
|
universal_newlines=True,
|
||||||
|
bufsize=1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Log the first few lines of output to confirm it's starting correctly
|
||||||
|
line_count = 0
|
||||||
|
for line in process.stdout:
|
||||||
|
logger.info(f"TensorBoard: {line.strip()}")
|
||||||
|
line_count += 1
|
||||||
|
|
||||||
|
# Check if TensorBoard has started successfully
|
||||||
|
if "TensorBoard" in line and "http://" in line:
|
||||||
|
url = line.strip().split("http://")[1].split(" ")[0]
|
||||||
|
logger.info(f"TensorBoard available at: http://{url}")
|
||||||
|
|
||||||
|
# Only log the first few lines
|
||||||
|
if line_count >= 10:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Continue reading output in background to prevent pipe from filling
|
||||||
|
def read_output():
|
||||||
|
for line in process.stdout:
|
||||||
|
pass
|
||||||
|
|
||||||
|
import threading
|
||||||
|
threading.Thread(target=read_output, daemon=True).start()
|
||||||
|
|
||||||
|
return process
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description='Launch TensorBoard with automatic port management')
|
||||||
|
parser.add_argument('--logdir', type=str, default='NN/models/saved/logs',
|
||||||
|
help='Directory containing TensorBoard event files')
|
||||||
|
parser.add_argument('--preferred-port', type=int, default=6007,
|
||||||
|
help='Preferred port to use')
|
||||||
|
parser.add_argument('--port-range', type=str, default='6000-7000',
|
||||||
|
help='Port range to try if preferred port is unavailable (format: min-max)')
|
||||||
|
parser.add_argument('--host', type=str, default='localhost',
|
||||||
|
help='Host to bind to')
|
||||||
|
parser.add_argument('--no-browser', action='store_true',
|
||||||
|
help='Do not open browser automatically')
|
||||||
|
parser.add_argument('--kill-stale', action='store_true',
|
||||||
|
help='Kill stale debug instances before starting')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Parse port range
|
||||||
|
try:
|
||||||
|
min_port, max_port = map(int, args.port_range.split('-'))
|
||||||
|
except ValueError:
|
||||||
|
logger.error(f"Invalid port range format: {args.port_range}. Use format: min-max")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Kill stale instances if requested
|
||||||
|
if args.kill_stale:
|
||||||
|
logger.info("Killing stale debug instances...")
|
||||||
|
count, _ = kill_stale_debug_instances()
|
||||||
|
logger.info(f"Killed {count} stale instances")
|
||||||
|
|
||||||
|
# Get an available port
|
||||||
|
try:
|
||||||
|
port = get_port_with_fallback(args.preferred_port, min_port, max_port)
|
||||||
|
logger.info(f"Using port {port} for TensorBoard")
|
||||||
|
except RuntimeError as e:
|
||||||
|
logger.error(str(e))
|
||||||
|
return 1
|
||||||
|
|
||||||
|
# Ensure log directory exists
|
||||||
|
logdir = os.path.abspath(args.logdir)
|
||||||
|
os.makedirs(logdir, exist_ok=True)
|
||||||
|
|
||||||
|
# Launch TensorBoard
|
||||||
|
process = launch_tensorboard(
|
||||||
|
logdir=logdir,
|
||||||
|
port=port,
|
||||||
|
host=args.host,
|
||||||
|
open_browser=not args.no_browser
|
||||||
|
)
|
||||||
|
|
||||||
|
# Wait for process to end (it shouldn't unless there's an error or user kills it)
|
||||||
|
try:
|
||||||
|
return_code = process.wait()
|
||||||
|
if return_code != 0:
|
||||||
|
logger.error(f"TensorBoard exited with code {return_code}")
|
||||||
|
return return_code
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("Received keyboard interrupt, shutting down TensorBoard...")
|
||||||
|
process.terminate()
|
||||||
|
try:
|
||||||
|
process.wait(timeout=5)
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.warning("TensorBoard didn't terminate gracefully, forcing kill")
|
||||||
|
process.kill()
|
||||||
|
|
||||||
|
return 0
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
sys.exit(main())
|
238
utils/port_manager.py
Normal file
238
utils/port_manager.py
Normal file
@ -0,0 +1,238 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Port Management Utility
|
||||||
|
|
||||||
|
This script provides utilities to:
|
||||||
|
1. Find available ports in a specified range
|
||||||
|
2. Kill stale processes running on specific ports
|
||||||
|
3. Kill all debug/training instances
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
- As a module: import port_manager and use its functions
|
||||||
|
- Directly: python port_manager.py --kill-stale --min-port 6000 --max-port 7000
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import socket
|
||||||
|
import argparse
|
||||||
|
import psutil
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
import signal
|
||||||
|
from typing import List, Tuple, Optional, Set
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
logger = logging.getLogger('port_manager')
|
||||||
|
|
||||||
|
# Define process names to look for when killing stale instances
|
||||||
|
DEBUG_PROCESS_KEYWORDS = [
|
||||||
|
'tensorboard',
|
||||||
|
'python train_',
|
||||||
|
'realtime.py',
|
||||||
|
'train_rl_with_realtime.py'
|
||||||
|
]
|
||||||
|
|
||||||
|
def is_port_in_use(port: int) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a port is in use
|
||||||
|
|
||||||
|
Args:
|
||||||
|
port (int): Port number to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if port is in use, False otherwise
|
||||||
|
"""
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||||
|
return s.connect_ex(('localhost', port)) == 0
|
||||||
|
|
||||||
|
def find_available_port(start_port: int, end_port: int) -> Optional[int]:
|
||||||
|
"""
|
||||||
|
Find an available port in the specified range
|
||||||
|
|
||||||
|
Args:
|
||||||
|
start_port (int): Lower bound of port range
|
||||||
|
end_port (int): Upper bound of port range
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Optional[int]: Available port number or None if no ports available
|
||||||
|
"""
|
||||||
|
for port in range(start_port, end_port + 1):
|
||||||
|
if not is_port_in_use(port):
|
||||||
|
return port
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_process_by_port(port: int) -> List[psutil.Process]:
|
||||||
|
"""
|
||||||
|
Get processes using a specific port
|
||||||
|
|
||||||
|
Args:
|
||||||
|
port (int): Port number to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[psutil.Process]: List of processes using the port
|
||||||
|
"""
|
||||||
|
processes = []
|
||||||
|
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
|
||||||
|
try:
|
||||||
|
for conn in proc.connections(kind='inet'):
|
||||||
|
if conn.laddr.port == port:
|
||||||
|
processes.append(proc)
|
||||||
|
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||||
|
pass
|
||||||
|
return processes
|
||||||
|
|
||||||
|
def kill_process_by_port(port: int) -> Tuple[int, List[str]]:
|
||||||
|
"""
|
||||||
|
Kill processes using a specific port
|
||||||
|
|
||||||
|
Args:
|
||||||
|
port (int): Port number to check
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[int, List[str]]: Count of killed processes and their names
|
||||||
|
"""
|
||||||
|
processes = get_process_by_port(port)
|
||||||
|
killed = []
|
||||||
|
|
||||||
|
for proc in processes:
|
||||||
|
try:
|
||||||
|
proc_name = " ".join(proc.cmdline()) if proc.cmdline() else proc.name()
|
||||||
|
logger.info(f"Terminating process {proc.pid}: {proc_name}")
|
||||||
|
proc.terminate()
|
||||||
|
killed.append(proc_name)
|
||||||
|
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Give processes time to terminate gracefully
|
||||||
|
if processes:
|
||||||
|
time.sleep(0.5)
|
||||||
|
|
||||||
|
# Force kill any remaining processes
|
||||||
|
for proc in processes:
|
||||||
|
try:
|
||||||
|
if proc.is_running():
|
||||||
|
logger.info(f"Force killing process {proc.pid}")
|
||||||
|
proc.kill()
|
||||||
|
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return len(killed), killed
|
||||||
|
|
||||||
|
def kill_stale_debug_instances() -> Tuple[int, Set[str]]:
|
||||||
|
"""
|
||||||
|
Kill all stale debug and training instances based on process names
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Tuple[int, Set[str]]: Count of killed processes and their names
|
||||||
|
"""
|
||||||
|
killed_count = 0
|
||||||
|
killed_procs = set()
|
||||||
|
|
||||||
|
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
|
||||||
|
try:
|
||||||
|
cmd = " ".join(proc.cmdline()) if proc.cmdline() else proc.name()
|
||||||
|
|
||||||
|
# Check if this is a debug/training process we should kill
|
||||||
|
if any(keyword in cmd for keyword in DEBUG_PROCESS_KEYWORDS):
|
||||||
|
logger.info(f"Terminating stale process {proc.pid}: {cmd}")
|
||||||
|
proc.terminate()
|
||||||
|
killed_count += 1
|
||||||
|
killed_procs.add(cmd)
|
||||||
|
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Give processes time to terminate
|
||||||
|
if killed_count > 0:
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
# Force kill any remaining processes
|
||||||
|
for proc in psutil.process_iter(['pid', 'name', 'cmdline']):
|
||||||
|
try:
|
||||||
|
cmd = " ".join(proc.cmdline()) if proc.cmdline() else proc.name()
|
||||||
|
|
||||||
|
if any(keyword in cmd for keyword in DEBUG_PROCESS_KEYWORDS) and proc.is_running():
|
||||||
|
logger.info(f"Force killing stale process {proc.pid}")
|
||||||
|
proc.kill()
|
||||||
|
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
|
||||||
|
pass
|
||||||
|
|
||||||
|
return killed_count, killed_procs
|
||||||
|
|
||||||
|
def get_port_with_fallback(preferred_port: int, min_port: int, max_port: int) -> int:
|
||||||
|
"""
|
||||||
|
Try to use preferred port, fall back to any available port in range
|
||||||
|
|
||||||
|
Args:
|
||||||
|
preferred_port (int): Preferred port to use
|
||||||
|
min_port (int): Minimum port in fallback range
|
||||||
|
max_port (int): Maximum port in fallback range
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
int: Available port number
|
||||||
|
"""
|
||||||
|
# First try the preferred port
|
||||||
|
if not is_port_in_use(preferred_port):
|
||||||
|
return preferred_port
|
||||||
|
|
||||||
|
# If preferred port is in use, try to free it
|
||||||
|
logger.info(f"Preferred port {preferred_port} is in use, attempting to free it")
|
||||||
|
kill_count, _ = kill_process_by_port(preferred_port)
|
||||||
|
|
||||||
|
if kill_count > 0 and not is_port_in_use(preferred_port):
|
||||||
|
logger.info(f"Successfully freed port {preferred_port}")
|
||||||
|
return preferred_port
|
||||||
|
|
||||||
|
# If we couldn't free the preferred port, find another available port
|
||||||
|
logger.info(f"Looking for available port in range {min_port}-{max_port}")
|
||||||
|
available_port = find_available_port(min_port, max_port)
|
||||||
|
|
||||||
|
if available_port:
|
||||||
|
logger.info(f"Using alternative port: {available_port}")
|
||||||
|
return available_port
|
||||||
|
else:
|
||||||
|
# If no ports are available, force kill processes in the entire range
|
||||||
|
logger.warning(f"No available ports in range {min_port}-{max_port}, freeing ports")
|
||||||
|
for port in range(min_port, max_port + 1):
|
||||||
|
kill_process_by_port(port)
|
||||||
|
|
||||||
|
# Try again
|
||||||
|
available_port = find_available_port(min_port, max_port)
|
||||||
|
if available_port:
|
||||||
|
logger.info(f"Using port {available_port} after freeing")
|
||||||
|
return available_port
|
||||||
|
else:
|
||||||
|
logger.error(f"Could not find available port even after freeing range {min_port}-{max_port}")
|
||||||
|
raise RuntimeError(f"No available ports in range {min_port}-{max_port}")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = argparse.ArgumentParser(description='Port management utility')
|
||||||
|
parser.add_argument('--kill-stale', action='store_true', help='Kill all stale debug instances')
|
||||||
|
parser.add_argument('--free-port', type=int, help='Free a specific port')
|
||||||
|
parser.add_argument('--find-port', action='store_true', help='Find an available port')
|
||||||
|
parser.add_argument('--min-port', type=int, default=6000, help='Minimum port in range')
|
||||||
|
parser.add_argument('--max-port', type=int, default=7000, help='Maximum port in range')
|
||||||
|
parser.add_argument('--preferred-port', type=int, help='Preferred port to use')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.kill_stale:
|
||||||
|
count, procs = kill_stale_debug_instances()
|
||||||
|
logger.info(f"Killed {count} stale processes")
|
||||||
|
for proc in procs:
|
||||||
|
logger.info(f" - {proc}")
|
||||||
|
|
||||||
|
if args.free_port:
|
||||||
|
count, killed = kill_process_by_port(args.free_port)
|
||||||
|
logger.info(f"Killed {count} processes using port {args.free_port}")
|
||||||
|
for proc in killed:
|
||||||
|
logger.info(f" - {proc}")
|
||||||
|
|
||||||
|
if args.find_port or args.preferred_port:
|
||||||
|
preferred = args.preferred_port if args.preferred_port else args.min_port
|
||||||
|
port = get_port_with_fallback(preferred, args.min_port, args.max_port)
|
||||||
|
print(port) # Print only the port number for easy capture in scripts
|
Loading…
x
Reference in New Issue
Block a user