i think we fixed mexc interface at the end!!!
This commit is contained in:
@ -65,11 +65,19 @@ class MEXCInterface(ExchangeInterface):
|
||||
return False
|
||||
|
||||
def _format_spot_symbol(self, symbol: str) -> str:
|
||||
"""Formats a symbol to MEXC spot API standard (e.g., 'ETH/USDT' -> 'ETHUSDT')."""
|
||||
"""Formats a symbol to MEXC spot API standard (e.g., 'ETH/USDT' -> 'ETHUSDC')."""
|
||||
if '/' in symbol:
|
||||
base, quote = symbol.split('/')
|
||||
# Convert USDT to USDC for MEXC spot trading
|
||||
if quote.upper() == 'USDT':
|
||||
quote = 'USDC'
|
||||
return f"{base.upper()}{quote.upper()}"
|
||||
return symbol.upper()
|
||||
else:
|
||||
# Convert USDT to USDC for symbols like ETHUSDT
|
||||
symbol = symbol.upper()
|
||||
if symbol.endswith('USDT'):
|
||||
symbol = symbol.replace('USDT', 'USDC')
|
||||
return symbol
|
||||
|
||||
def _format_futures_symbol(self, symbol: str) -> str:
|
||||
"""Formats a symbol to MEXC futures API standard (e.g., 'ETH/USDT' -> 'ETH_USDT')."""
|
||||
@ -77,22 +85,37 @@ class MEXCInterface(ExchangeInterface):
|
||||
return symbol.replace('/', '_').upper()
|
||||
|
||||
def _generate_signature(self, timestamp: str, method: str, endpoint: str, params: Dict[str, Any]) -> str:
|
||||
"""Generate signature for private API calls"""
|
||||
# Build the string to sign
|
||||
sign_str = self.api_key + timestamp
|
||||
if params:
|
||||
# Append all parameters sorted by key, without URL encoding for signature
|
||||
query_str = "&".join([f"{k}={v}" for k, v in sorted(params.items()) if k != 'signature'])
|
||||
if query_str:
|
||||
sign_str += query_str
|
||||
"""Generate signature for private API calls using MEXC's expected parameter order"""
|
||||
# MEXC requires specific parameter ordering, not alphabetical
|
||||
# Based on successful test: symbol, side, type, quantity, timestamp, then other params
|
||||
mexc_param_order = ['symbol', 'side', 'type', 'quantity', 'timestamp', 'recvWindow']
|
||||
|
||||
# Build ordered parameter list
|
||||
ordered_params = []
|
||||
|
||||
# Add parameters in MEXC's expected order
|
||||
for param_name in mexc_param_order:
|
||||
if param_name in params and param_name != 'signature':
|
||||
ordered_params.append(f"{param_name}={params[param_name]}")
|
||||
|
||||
# Add any remaining parameters not in the standard order (alphabetically)
|
||||
remaining_params = {k: v for k, v in params.items() if k not in mexc_param_order and k != 'signature'}
|
||||
for key in sorted(remaining_params.keys()):
|
||||
ordered_params.append(f"{key}={remaining_params[key]}")
|
||||
|
||||
# Create query string (MEXC doesn't use the api_key + timestamp prefix)
|
||||
query_string = '&'.join(ordered_params)
|
||||
|
||||
logger.debug(f"MEXC signature query string: {query_string}")
|
||||
|
||||
logger.debug(f"Signature string: {sign_str}")
|
||||
# Generate HMAC SHA256 signature
|
||||
signature = hmac.new(
|
||||
self.api_secret.encode('utf-8'),
|
||||
sign_str.encode('utf-8'),
|
||||
query_string.encode('utf-8'),
|
||||
hashlib.sha256
|
||||
).hexdigest()
|
||||
|
||||
logger.debug(f"MEXC signature: {signature}")
|
||||
return signature
|
||||
|
||||
def _send_public_request(self, method: str, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||
@ -139,24 +162,26 @@ class MEXCInterface(ExchangeInterface):
|
||||
"Request-Time": timestamp
|
||||
}
|
||||
|
||||
# Ensure endpoint does not start with a slash to avoid double slashes
|
||||
if endpoint.startswith('/'):
|
||||
endpoint = endpoint.lstrip('/')
|
||||
# For spot API, use the correct endpoint format
|
||||
if not endpoint.startswith('api/v3/'):
|
||||
endpoint = f"api/v3/{endpoint}"
|
||||
url = f"{self.base_url}/{endpoint}"
|
||||
try:
|
||||
if method.upper() == "GET":
|
||||
response = self.session.get(url, headers=headers, params=params, timeout=10)
|
||||
elif method.upper() == "POST":
|
||||
headers["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
response = self.session.post(url, headers=headers, data=params, timeout=10)
|
||||
# MEXC expects POST parameters as query string, not in body
|
||||
response = self.session.post(url, headers=headers, params=params, timeout=10)
|
||||
else:
|
||||
logger.error(f"Unsupported method: {method}")
|
||||
return None
|
||||
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
if data.get('success', False):
|
||||
return data.get('data', data)
|
||||
# For successful responses, return the data directly
|
||||
# MEXC doesn't always use 'success' field for successful operations
|
||||
if response.status_code == 200:
|
||||
return data
|
||||
else:
|
||||
logger.error(f"API error: Status Code: {response.status_code}, Response: {response.text}")
|
||||
return None
|
||||
@ -170,7 +195,7 @@ class MEXCInterface(ExchangeInterface):
|
||||
|
||||
def get_account_info(self) -> Dict[str, Any]:
|
||||
"""Get account information"""
|
||||
endpoint = "/api/v3/account"
|
||||
endpoint = "account"
|
||||
result = self._send_private_request("GET", endpoint, {})
|
||||
return result if result is not None else {}
|
||||
|
||||
@ -235,9 +260,39 @@ class MEXCInterface(ExchangeInterface):
|
||||
logger.error(f"Failed to get ticker for {symbol}")
|
||||
return None
|
||||
|
||||
def get_api_symbols(self) -> List[str]:
|
||||
"""Get list of symbols supported for API trading"""
|
||||
try:
|
||||
endpoint = "selfSymbols"
|
||||
result = self._send_private_request("GET", endpoint, {})
|
||||
if result and 'data' in result:
|
||||
return result['data']
|
||||
elif isinstance(result, list):
|
||||
return result
|
||||
else:
|
||||
logger.warning(f"Unexpected response format for API symbols: {result}")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting API symbols: {e}")
|
||||
return []
|
||||
|
||||
def is_symbol_supported(self, symbol: str) -> bool:
|
||||
"""Check if a symbol is supported for API trading"""
|
||||
formatted_symbol = self._format_spot_symbol(symbol)
|
||||
supported_symbols = self.get_api_symbols()
|
||||
return formatted_symbol in supported_symbols
|
||||
|
||||
def place_order(self, symbol: str, side: str, order_type: str, quantity: float, price: Optional[float] = None) -> Dict[str, Any]:
|
||||
"""Place a new order on MEXC."""
|
||||
formatted_symbol = self._format_spot_symbol(symbol)
|
||||
|
||||
# Check if symbol is supported for API trading
|
||||
if not self.is_symbol_supported(symbol):
|
||||
supported_symbols = self.get_api_symbols()
|
||||
logger.error(f"Symbol {formatted_symbol} is not supported for API trading")
|
||||
logger.info(f"Supported symbols include: {supported_symbols[:10]}...") # Show first 10
|
||||
return {}
|
||||
|
||||
endpoint = "order"
|
||||
|
||||
params: Dict[str, Any] = {
|
||||
|
@ -1,285 +1,15 @@
|
||||
{
|
||||
"example_cnn": [
|
||||
{
|
||||
"checkpoint_id": "example_cnn_20250624_213913",
|
||||
"model_name": "example_cnn",
|
||||
"model_type": "cnn",
|
||||
"file_path": "NN\\models\\saved\\example_cnn\\example_cnn_20250624_213913.pt",
|
||||
"created_at": "2025-06-24T21:39:13.559926",
|
||||
"file_size_mb": 0.0797882080078125,
|
||||
"performance_score": 65.67219525381417,
|
||||
"accuracy": 0.28019601724789606,
|
||||
"loss": 1.9252885885630378,
|
||||
"val_accuracy": 0.21531048803825983,
|
||||
"val_loss": 1.953166686238386,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": 1,
|
||||
"training_time_hours": 0.1,
|
||||
"total_parameters": 20163,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "example_cnn_20250624_213913",
|
||||
"model_name": "example_cnn",
|
||||
"model_type": "cnn",
|
||||
"file_path": "NN\\models\\saved\\example_cnn\\example_cnn_20250624_213913.pt",
|
||||
"created_at": "2025-06-24T21:39:13.563368",
|
||||
"file_size_mb": 0.0797882080078125,
|
||||
"performance_score": 85.85617724870231,
|
||||
"accuracy": 0.3797766367576808,
|
||||
"loss": 1.738881079808816,
|
||||
"val_accuracy": 0.31375868989071576,
|
||||
"val_loss": 1.758474336328537,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": 2,
|
||||
"training_time_hours": 0.2,
|
||||
"total_parameters": 20163,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "example_cnn_20250624_213913",
|
||||
"model_name": "example_cnn",
|
||||
"model_type": "cnn",
|
||||
"file_path": "NN\\models\\saved\\example_cnn\\example_cnn_20250624_213913.pt",
|
||||
"created_at": "2025-06-24T21:39:13.566494",
|
||||
"file_size_mb": 0.0797882080078125,
|
||||
"performance_score": 96.86696983784515,
|
||||
"accuracy": 0.41565501055141396,
|
||||
"loss": 1.731468873500252,
|
||||
"val_accuracy": 0.38848400580514414,
|
||||
"val_loss": 1.8154629243104177,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": 3,
|
||||
"training_time_hours": 0.30000000000000004,
|
||||
"total_parameters": 20163,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "example_cnn_20250624_213913",
|
||||
"model_name": "example_cnn",
|
||||
"model_type": "cnn",
|
||||
"file_path": "NN\\models\\saved\\example_cnn\\example_cnn_20250624_213913.pt",
|
||||
"created_at": "2025-06-24T21:39:13.569547",
|
||||
"file_size_mb": 0.0797882080078125,
|
||||
"performance_score": 106.29887197896815,
|
||||
"accuracy": 0.4639872237832544,
|
||||
"loss": 1.4731813440281318,
|
||||
"val_accuracy": 0.4291565645756503,
|
||||
"val_loss": 1.5423255128941882,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": 4,
|
||||
"training_time_hours": 0.4,
|
||||
"total_parameters": 20163,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "example_cnn_20250624_213913",
|
||||
"model_name": "example_cnn",
|
||||
"model_type": "cnn",
|
||||
"file_path": "NN\\models\\saved\\example_cnn\\example_cnn_20250624_213913.pt",
|
||||
"created_at": "2025-06-24T21:39:13.575375",
|
||||
"file_size_mb": 0.0797882080078125,
|
||||
"performance_score": 115.87168812846218,
|
||||
"accuracy": 0.5256293272461906,
|
||||
"loss": 1.3264778472364203,
|
||||
"val_accuracy": 0.46011511860837684,
|
||||
"val_loss": 1.3762786097581432,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": 5,
|
||||
"training_time_hours": 0.5,
|
||||
"total_parameters": 20163,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
}
|
||||
],
|
||||
"example_manual": [
|
||||
{
|
||||
"checkpoint_id": "example_manual_20250624_213913",
|
||||
"model_name": "example_manual",
|
||||
"model_type": "cnn",
|
||||
"file_path": "NN\\models\\saved\\example_manual\\example_manual_20250624_213913.pt",
|
||||
"created_at": "2025-06-24T21:39:13.578488",
|
||||
"file_size_mb": 0.0018634796142578125,
|
||||
"performance_score": 186.07000000000002,
|
||||
"accuracy": 0.85,
|
||||
"loss": 0.45,
|
||||
"val_accuracy": 0.82,
|
||||
"val_loss": 0.48,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": 25,
|
||||
"training_time_hours": 2.5,
|
||||
"total_parameters": 33,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
}
|
||||
],
|
||||
"extrema_trainer": [
|
||||
{
|
||||
"checkpoint_id": "extrema_trainer_20250624_221645",
|
||||
"model_name": "extrema_trainer",
|
||||
"model_type": "extrema_trainer",
|
||||
"file_path": "NN\\models\\saved\\extrema_trainer\\extrema_trainer_20250624_221645.pt",
|
||||
"created_at": "2025-06-24T22:16:45.728299",
|
||||
"file_size_mb": 0.0013427734375,
|
||||
"performance_score": 0.1,
|
||||
"accuracy": 0.0,
|
||||
"loss": null,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "extrema_trainer_20250624_221915",
|
||||
"model_name": "extrema_trainer",
|
||||
"model_type": "extrema_trainer",
|
||||
"file_path": "NN\\models\\saved\\extrema_trainer\\extrema_trainer_20250624_221915.pt",
|
||||
"created_at": "2025-06-24T22:19:15.325368",
|
||||
"file_size_mb": 0.0013427734375,
|
||||
"performance_score": 0.1,
|
||||
"accuracy": 0.0,
|
||||
"loss": null,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "extrema_trainer_20250624_222303",
|
||||
"model_name": "extrema_trainer",
|
||||
"model_type": "extrema_trainer",
|
||||
"file_path": "NN\\models\\saved\\extrema_trainer\\extrema_trainer_20250624_222303.pt",
|
||||
"created_at": "2025-06-24T22:23:03.283194",
|
||||
"file_size_mb": 0.0013427734375,
|
||||
"performance_score": 0.1,
|
||||
"accuracy": 0.0,
|
||||
"loss": null,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "extrema_trainer_20250625_105812",
|
||||
"model_name": "extrema_trainer",
|
||||
"model_type": "extrema_trainer",
|
||||
"file_path": "NN\\models\\saved\\extrema_trainer\\extrema_trainer_20250625_105812.pt",
|
||||
"created_at": "2025-06-25T10:58:12.424290",
|
||||
"file_size_mb": 0.0013427734375,
|
||||
"performance_score": 0.1,
|
||||
"accuracy": 0.0,
|
||||
"loss": null,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "extrema_trainer_20250625_110836",
|
||||
"model_name": "extrema_trainer",
|
||||
"model_type": "extrema_trainer",
|
||||
"file_path": "NN\\models\\saved\\extrema_trainer\\extrema_trainer_20250625_110836.pt",
|
||||
"created_at": "2025-06-25T11:08:36.772996",
|
||||
"file_size_mb": 0.0013427734375,
|
||||
"performance_score": 0.1,
|
||||
"accuracy": 0.0,
|
||||
"loss": null,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
}
|
||||
],
|
||||
"dqn_agent": [
|
||||
{
|
||||
"checkpoint_id": "dqn_agent_20250627_030115",
|
||||
"model_name": "dqn_agent",
|
||||
"model_type": "dqn",
|
||||
"file_path": "models\\saved\\dqn_agent\\dqn_agent_20250627_030115.pt",
|
||||
"created_at": "2025-06-27T03:01:15.021842",
|
||||
"file_size_mb": 57.57266807556152,
|
||||
"performance_score": 95.0,
|
||||
"accuracy": 0.85,
|
||||
"loss": 0.0145,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
}
|
||||
],
|
||||
"enhanced_cnn": [
|
||||
{
|
||||
"checkpoint_id": "enhanced_cnn_20250627_030115",
|
||||
"model_name": "enhanced_cnn",
|
||||
"model_type": "cnn",
|
||||
"file_path": "models\\saved\\enhanced_cnn\\enhanced_cnn_20250627_030115.pt",
|
||||
"created_at": "2025-06-27T03:01:15.024856",
|
||||
"file_size_mb": 0.7184391021728516,
|
||||
"performance_score": 92.0,
|
||||
"accuracy": 0.88,
|
||||
"loss": 0.0187,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
}
|
||||
],
|
||||
"decision": [
|
||||
{
|
||||
"checkpoint_id": "decision_20250702_083032",
|
||||
"checkpoint_id": "decision_20250704_002853",
|
||||
"model_name": "decision",
|
||||
"model_type": "decision_fusion",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250702_083032.pt",
|
||||
"created_at": "2025-07-02T08:30:32.225869",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250704_002853.pt",
|
||||
"created_at": "2025-07-04T00:28:53.963706",
|
||||
"file_size_mb": 0.06720924377441406,
|
||||
"performance_score": 102.79972716525019,
|
||||
"performance_score": 102.79960449231176,
|
||||
"accuracy": null,
|
||||
"loss": 2.7283549419721e-06,
|
||||
"loss": 3.9550925251065546e-06,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
@ -291,15 +21,15 @@
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "decision_20250702_082925",
|
||||
"checkpoint_id": "decision_20250704_002524",
|
||||
"model_name": "decision",
|
||||
"model_type": "decision_fusion",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250702_082925.pt",
|
||||
"created_at": "2025-07-02T08:29:25.899383",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250704_002524.pt",
|
||||
"created_at": "2025-07-04T00:25:24.871025",
|
||||
"file_size_mb": 0.06720924377441406,
|
||||
"performance_score": 102.7997148991013,
|
||||
"performance_score": 102.79956403304311,
|
||||
"accuracy": null,
|
||||
"loss": 2.8510171153430164e-06,
|
||||
"loss": 4.3596885756106274e-06,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
@ -311,15 +41,15 @@
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "decision_20250702_082924",
|
||||
"checkpoint_id": "decision_20250704_002845",
|
||||
"model_name": "decision",
|
||||
"model_type": "decision_fusion",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250702_082924.pt",
|
||||
"created_at": "2025-07-02T08:29:24.538886",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250704_002845.pt",
|
||||
"created_at": "2025-07-04T00:28:45.968378",
|
||||
"file_size_mb": 0.06720924377441406,
|
||||
"performance_score": 102.79971291710027,
|
||||
"performance_score": 102.79955325642156,
|
||||
"accuracy": null,
|
||||
"loss": 2.8708372390440218e-06,
|
||||
"loss": 4.467455742483496e-06,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
@ -331,15 +61,15 @@
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "decision_20250702_082925",
|
||||
"checkpoint_id": "decision_20250704_002527",
|
||||
"model_name": "decision",
|
||||
"model_type": "decision_fusion",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250702_082925.pt",
|
||||
"created_at": "2025-07-02T08:29:25.218718",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250704_002527.pt",
|
||||
"created_at": "2025-07-04T00:25:27.298202",
|
||||
"file_size_mb": 0.06720924377441406,
|
||||
"performance_score": 102.79971274601752,
|
||||
"performance_score": 102.79954341144796,
|
||||
"accuracy": null,
|
||||
"loss": 2.87254807635711e-06,
|
||||
"loss": 4.5659063679132875e-06,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
@ -351,117 +81,15 @@
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "decision_20250702_082925",
|
||||
"checkpoint_id": "decision_20250704_021057",
|
||||
"model_name": "decision",
|
||||
"model_type": "decision_fusion",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250702_082925.pt",
|
||||
"created_at": "2025-07-02T08:29:25.332228",
|
||||
"file_path": "NN\\models\\saved\\decision\\decision_20250704_021057.pt",
|
||||
"created_at": "2025-07-04T02:10:57.474433",
|
||||
"file_size_mb": 0.06720924377441406,
|
||||
"performance_score": 102.79971263447665,
|
||||
"performance_score": 102.79953923346461,
|
||||
"accuracy": null,
|
||||
"loss": 2.873663491419011e-06,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
}
|
||||
],
|
||||
"cob_rl": [
|
||||
{
|
||||
"checkpoint_id": "cob_rl_20250702_004145",
|
||||
"model_name": "cob_rl",
|
||||
"model_type": "cob_rl",
|
||||
"file_path": "NN\\models\\saved\\cob_rl\\cob_rl_20250702_004145.pt",
|
||||
"created_at": "2025-07-02T00:41:45.481742",
|
||||
"file_size_mb": 0.001003265380859375,
|
||||
"performance_score": 9.644,
|
||||
"accuracy": null,
|
||||
"loss": 0.356,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "cob_rl_20250702_004315",
|
||||
"model_name": "cob_rl",
|
||||
"model_type": "cob_rl",
|
||||
"file_path": "NN\\models\\saved\\cob_rl\\cob_rl_20250702_004315.pt",
|
||||
"created_at": "2025-07-02T00:43:15.996943",
|
||||
"file_size_mb": 0.001003265380859375,
|
||||
"performance_score": 9.644,
|
||||
"accuracy": null,
|
||||
"loss": 0.356,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "cob_rl_20250702_004446",
|
||||
"model_name": "cob_rl",
|
||||
"model_type": "cob_rl",
|
||||
"file_path": "NN\\models\\saved\\cob_rl\\cob_rl_20250702_004446.pt",
|
||||
"created_at": "2025-07-02T00:44:46.656201",
|
||||
"file_size_mb": 0.001003265380859375,
|
||||
"performance_score": 9.644,
|
||||
"accuracy": null,
|
||||
"loss": 0.356,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "cob_rl_20250702_004617",
|
||||
"model_name": "cob_rl",
|
||||
"model_type": "cob_rl",
|
||||
"file_path": "NN\\models\\saved\\cob_rl\\cob_rl_20250702_004617.pt",
|
||||
"created_at": "2025-07-02T00:46:17.380509",
|
||||
"file_size_mb": 0.001003265380859375,
|
||||
"performance_score": 9.644,
|
||||
"accuracy": null,
|
||||
"loss": 0.356,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
"pnl": null,
|
||||
"epoch": null,
|
||||
"training_time_hours": null,
|
||||
"total_parameters": null,
|
||||
"wandb_run_id": null,
|
||||
"wandb_artifact_name": null
|
||||
},
|
||||
{
|
||||
"checkpoint_id": "cob_rl_20250702_004712",
|
||||
"model_name": "cob_rl",
|
||||
"model_type": "cob_rl",
|
||||
"file_path": "NN\\models\\saved\\cob_rl\\cob_rl_20250702_004712.pt",
|
||||
"created_at": "2025-07-02T00:47:12.447176",
|
||||
"file_size_mb": 0.001003265380859375,
|
||||
"performance_score": 9.644,
|
||||
"accuracy": null,
|
||||
"loss": 0.356,
|
||||
"loss": 4.607686584533001e-06,
|
||||
"val_accuracy": null,
|
||||
"val_loss": null,
|
||||
"reward": null,
|
||||
|
@ -1549,38 +1549,28 @@ class TradingOrchestrator:
|
||||
self.model_states['extrema_trainer']['current_loss'] = estimated_loss
|
||||
self.model_states['extrema_trainer']['best_loss'] = estimated_loss
|
||||
|
||||
# Ensure initial_loss is set for new models
|
||||
# NO LONGER SETTING SYNTHETIC INITIAL LOSS VALUES
|
||||
# Keep all None values as None if no real data is available
|
||||
# This prevents the "fake progress" issue where Current Loss = Initial Loss
|
||||
|
||||
# Only set initial_loss from actual training history if available
|
||||
for model_key, model_state in self.model_states.items():
|
||||
if model_state['initial_loss'] is None:
|
||||
# Set reasonable initial loss values for new models
|
||||
initial_losses = {
|
||||
'dqn': 0.285,
|
||||
'cnn': 0.412,
|
||||
'cob_rl': 0.356,
|
||||
'decision': 0.298,
|
||||
'extrema_trainer': 0.356
|
||||
}
|
||||
model_state['initial_loss'] = initial_losses.get(model_key, 0.3)
|
||||
|
||||
# If current_loss is None, set it to initial_loss
|
||||
if model_state['current_loss'] is None:
|
||||
model_state['current_loss'] = model_state['initial_loss']
|
||||
|
||||
# If best_loss is None, set it to current_loss
|
||||
if model_state['best_loss'] is None:
|
||||
model_state['best_loss'] = model_state['current_loss']
|
||||
# Leave initial_loss as None if no real training history exists
|
||||
# Leave current_loss as None if model isn't actively training
|
||||
# Leave best_loss as None if no checkpoints exist with real performance data
|
||||
pass # No synthetic data generation
|
||||
|
||||
return self.model_states
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting model states: {e}")
|
||||
# Return safe fallback values
|
||||
# Return None values instead of synthetic data
|
||||
return {
|
||||
'dqn': {'initial_loss': 0.285, 'current_loss': 0.285, 'best_loss': 0.285, 'checkpoint_loaded': False},
|
||||
'cnn': {'initial_loss': 0.412, 'current_loss': 0.412, 'best_loss': 0.412, 'checkpoint_loaded': False},
|
||||
'cob_rl': {'initial_loss': 0.356, 'current_loss': 0.356, 'best_loss': 0.356, 'checkpoint_loaded': False},
|
||||
'decision': {'initial_loss': 0.298, 'current_loss': 0.298, 'best_loss': 0.298, 'checkpoint_loaded': False},
|
||||
'extrema_trainer': {'initial_loss': 0.356, 'current_loss': 0.356, 'best_loss': 0.356, 'checkpoint_loaded': False}
|
||||
'dqn': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False},
|
||||
'cnn': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False},
|
||||
'cob_rl': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False},
|
||||
'decision': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False},
|
||||
'extrema_trainer': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False}
|
||||
}
|
||||
|
||||
def update_model_loss(self, model_name: str, current_loss: float, best_loss: float = None):
|
||||
|
@ -59,7 +59,7 @@ class SignalAccumulator:
|
||||
confidence_sum: float = 0.0
|
||||
successful_predictions: int = 0
|
||||
total_predictions: int = 0
|
||||
last_reset_time: datetime = None
|
||||
last_reset_time: Optional[datetime] = None
|
||||
|
||||
def __post_init__(self):
|
||||
if self.signals is None:
|
||||
@ -99,12 +99,13 @@ class RealtimeRLCOBTrader:
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
symbols: List[str] = None,
|
||||
trading_executor: TradingExecutor = None,
|
||||
symbols: Optional[List[str]] = None,
|
||||
trading_executor: Optional[TradingExecutor] = None,
|
||||
model_checkpoint_dir: str = "models/realtime_rl_cob",
|
||||
inference_interval_ms: int = 200,
|
||||
min_confidence_threshold: float = 0.35, # Lowered from 0.7 for more aggressive trading
|
||||
required_confident_predictions: int = 3):
|
||||
required_confident_predictions: int = 3,
|
||||
checkpoint_manager: Any = None):
|
||||
|
||||
self.symbols = symbols or ['BTC/USDT', 'ETH/USDT']
|
||||
self.trading_executor = trading_executor
|
||||
@ -113,6 +114,16 @@ class RealtimeRLCOBTrader:
|
||||
self.min_confidence_threshold = min_confidence_threshold
|
||||
self.required_confident_predictions = required_confident_predictions
|
||||
|
||||
# Initialize CheckpointManager (either provided or get global instance)
|
||||
if checkpoint_manager is None:
|
||||
from utils.checkpoint_manager import get_checkpoint_manager
|
||||
self.checkpoint_manager = get_checkpoint_manager()
|
||||
else:
|
||||
self.checkpoint_manager = checkpoint_manager
|
||||
|
||||
# Track start time for training duration calculation
|
||||
self.start_time = datetime.now() # Initialize start_time
|
||||
|
||||
# Setup device
|
||||
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
logger.info(f"Using device: {self.device}")
|
||||
@ -819,29 +830,26 @@ class RealtimeRLCOBTrader:
|
||||
actual_direction = 1 # SIDEWAYS
|
||||
|
||||
# Calculate reward based on prediction accuracy
|
||||
reward = self._calculate_prediction_reward(
|
||||
prediction.predicted_direction,
|
||||
actual_direction,
|
||||
prediction.confidence,
|
||||
prediction.predicted_change,
|
||||
actual_change
|
||||
prediction.reward = self._calculate_prediction_reward(
|
||||
symbol=symbol,
|
||||
predicted_direction=prediction.predicted_direction,
|
||||
actual_direction=actual_direction,
|
||||
confidence=prediction.confidence,
|
||||
predicted_change=prediction.predicted_change,
|
||||
actual_change=actual_change
|
||||
)
|
||||
|
||||
# Update prediction
|
||||
prediction.actual_direction = actual_direction
|
||||
prediction.actual_change = actual_change
|
||||
prediction.reward = reward
|
||||
|
||||
# Update training stats
|
||||
stats = self.training_stats[symbol]
|
||||
stats['total_predictions'] += 1
|
||||
if reward > 0:
|
||||
if prediction.reward > 0:
|
||||
stats['successful_predictions'] += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calculating rewards for {symbol}: {e}")
|
||||
|
||||
def _calculate_prediction_reward(self,
|
||||
symbol: str,
|
||||
predicted_direction: int,
|
||||
actual_direction: int,
|
||||
confidence: float,
|
||||
@ -849,67 +857,52 @@ class RealtimeRLCOBTrader:
|
||||
actual_change: float,
|
||||
current_pnl: float = 0.0,
|
||||
position_duration: float = 0.0) -> float:
|
||||
"""Calculate reward for a prediction with PnL-aware loss cutting optimization"""
|
||||
try:
|
||||
# Base reward for correct direction
|
||||
if predicted_direction == actual_direction:
|
||||
base_reward = 1.0
|
||||
"""Calculate reward based on prediction accuracy and actual price movement"""
|
||||
reward = 0.0
|
||||
|
||||
# Base reward for correct direction prediction
|
||||
if predicted_direction == actual_direction:
|
||||
reward += 1.0 * confidence # Reward scales with confidence
|
||||
else:
|
||||
reward -= 0.5 # Penalize incorrect predictions
|
||||
|
||||
# Reward for predicting large changes correctly (proportional to actual change)
|
||||
if predicted_direction == actual_direction and abs(predicted_change) > 0.001:
|
||||
reward += abs(actual_change) * 5.0 # Amplify reward for significant moves
|
||||
|
||||
# Penalize for large predicted changes that are wrong
|
||||
if predicted_direction != actual_direction and abs(predicted_change) > 0.001:
|
||||
reward -= abs(predicted_change) * 2.0
|
||||
|
||||
# Add reward for PnL (realized or unrealized)
|
||||
reward += current_pnl * 0.1 # Small reward for PnL, adjusted by a factor
|
||||
|
||||
# Dynamic adjustment based on recent PnL (loss cutting incentive)
|
||||
if self.pnl_history[symbol]:
|
||||
latest_pnl_entry = self.pnl_history[symbol][-1] # Get the latest PnL entry
|
||||
# Ensure latest_pnl_entry is a dict and has 'pnl' key, otherwise default to 0.0
|
||||
latest_pnl_value = latest_pnl_entry.get('pnl', 0.0) if isinstance(latest_pnl_entry, dict) else 0.0
|
||||
|
||||
# Incentivize closing losing trades early
|
||||
if latest_pnl_value < 0 and position_duration > 60: # If losing position open for > 60s
|
||||
# More aggressively penalize holding losing positions, or reward closing them
|
||||
reward -= (abs(latest_pnl_value) * 0.2) # Increased penalty for sustained losses
|
||||
|
||||
# Discourage taking new positions if overall PnL is negative or volatile
|
||||
# This requires a more complex calculation of overall PnL, potentially average of last N trades
|
||||
# For simplicity, let's use the 'best_pnl' to decide if we are in a good state to trade
|
||||
|
||||
# Calculate the current best PnL from history, ensuring it's not empty
|
||||
pnl_values = [entry.get('pnl', 0.0) for entry in self.pnl_history[symbol] if isinstance(entry, dict)]
|
||||
if not pnl_values:
|
||||
best_pnl = 0.0
|
||||
else:
|
||||
base_reward = -1.0
|
||||
best_pnl = max(pnl_values)
|
||||
|
||||
# Scale by confidence
|
||||
confidence_scaled_reward = base_reward * confidence
|
||||
if best_pnl < 0.0: # If recent best PnL is negative, reduce reward for new trades
|
||||
reward -= 0.1 # Small penalty for trading in a losing streak
|
||||
|
||||
# Additional reward for magnitude accuracy
|
||||
if predicted_direction != 1: # Not sideways
|
||||
magnitude_accuracy = 1.0 - abs(predicted_change - actual_change) / max(abs(actual_change), 0.001)
|
||||
magnitude_accuracy = max(0.0, magnitude_accuracy)
|
||||
confidence_scaled_reward += magnitude_accuracy * 0.5
|
||||
|
||||
# Penalty for overconfident wrong predictions
|
||||
if base_reward < 0 and confidence > 0.8:
|
||||
confidence_scaled_reward *= 1.5 # Increase penalty
|
||||
|
||||
# === PnL-AWARE LOSS CUTTING REWARDS ===
|
||||
|
||||
pnl_reward = 0.0
|
||||
|
||||
# Reward cutting losses early (SIDEWAYS when losing)
|
||||
if current_pnl < -10.0: # In significant loss
|
||||
if predicted_direction == 1: # SIDEWAYS (exit signal)
|
||||
# Reward cutting losses before they get worse
|
||||
loss_cutting_bonus = min(1.0, abs(current_pnl) / 100.0) * confidence
|
||||
pnl_reward += loss_cutting_bonus
|
||||
elif predicted_direction != 1: # Continuing to trade while in loss
|
||||
# Penalty for not cutting losses
|
||||
pnl_reward -= 0.5 * confidence
|
||||
|
||||
# Reward protecting profits (SIDEWAYS when in profit and market turning)
|
||||
elif current_pnl > 10.0: # In profit
|
||||
if predicted_direction == 1 and base_reward > 0: # Correct SIDEWAYS prediction
|
||||
# Reward protecting profits from reversal
|
||||
profit_protection_bonus = min(0.5, current_pnl / 200.0) * confidence
|
||||
pnl_reward += profit_protection_bonus
|
||||
|
||||
# Duration penalty for holding losing positions
|
||||
if current_pnl < 0 and position_duration > 3600: # Losing for > 1 hour
|
||||
duration_penalty = min(1.0, position_duration / 7200.0) * 0.3 # Up to 30% penalty
|
||||
confidence_scaled_reward -= duration_penalty
|
||||
|
||||
# Severe penalty for letting small losses become big losses
|
||||
if current_pnl < -50.0: # Large loss
|
||||
drawdown_penalty = min(2.0, abs(current_pnl) / 100.0) * confidence
|
||||
confidence_scaled_reward -= drawdown_penalty
|
||||
|
||||
# Total reward
|
||||
total_reward = confidence_scaled_reward + pnl_reward
|
||||
|
||||
# Clamp final reward
|
||||
return max(-5.0, min(5.0, float(total_reward)))
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error calculating reward: {e}")
|
||||
return 0.0
|
||||
return reward
|
||||
|
||||
async def _train_batch(self, symbol: str, predictions: List[PredictionResult]) -> float:
|
||||
"""Train model on a batch of predictions"""
|
||||
@ -1021,20 +1014,36 @@ class RealtimeRLCOBTrader:
|
||||
await asyncio.sleep(60)
|
||||
|
||||
def _save_models(self):
|
||||
"""Save all models to disk"""
|
||||
"""Save all models to disk using CheckpointManager"""
|
||||
try:
|
||||
for symbol in self.symbols:
|
||||
symbol_safe = symbol.replace('/', '_')
|
||||
model_path = os.path.join(self.model_checkpoint_dir, f"{symbol_safe}_model.pt")
|
||||
model_name = f"cob_rl_{symbol.replace('/', '_').lower()}" # Standardize model name for CheckpointManager
|
||||
|
||||
# Save model state
|
||||
torch.save({
|
||||
'model_state_dict': self.models[symbol].state_dict(),
|
||||
'optimizer_state_dict': self.optimizers[symbol].state_dict(),
|
||||
'training_stats': self.training_stats[symbol],
|
||||
'inference_stats': self.inference_stats[symbol],
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}, model_path)
|
||||
# Prepare performance metrics for CheckpointManager
|
||||
performance_metrics = {
|
||||
'loss': self.training_stats[symbol].get('average_loss', 0.0),
|
||||
'reward': self.training_stats[symbol].get('average_reward', 0.0), # Assuming average_reward is tracked
|
||||
'accuracy': self.training_stats[symbol].get('average_accuracy', 0.0), # Assuming average_accuracy is tracked
|
||||
}
|
||||
if self.trading_executor: # Add check for trading_executor
|
||||
daily_stats = self.trading_executor.get_daily_stats()
|
||||
performance_metrics['pnl'] = daily_stats.get('total_pnl', 0.0) # Example, get actual pnl
|
||||
performance_metrics['training_samples'] = self.training_stats[symbol].get('total_training_steps', 0)
|
||||
|
||||
# Prepare training metadata for CheckpointManager
|
||||
training_metadata = {
|
||||
'total_parameters': sum(p.numel() for p in self.models[symbol].parameters()),
|
||||
'epoch': self.training_stats[symbol].get('total_training_steps', 0), # Using total_training_steps as pseudo-epoch
|
||||
'training_time_hours': (datetime.now() - self.start_time).total_seconds() / 3600
|
||||
}
|
||||
|
||||
self.checkpoint_manager.save_checkpoint(
|
||||
model=self.models[symbol],
|
||||
model_name=model_name,
|
||||
model_type='COB_RL', # Specify model type
|
||||
performance_metrics=performance_metrics,
|
||||
training_metadata=training_metadata
|
||||
)
|
||||
|
||||
logger.debug(f"Saved model for {symbol}")
|
||||
|
||||
@ -1042,13 +1051,15 @@ class RealtimeRLCOBTrader:
|
||||
logger.error(f"Error saving models: {e}")
|
||||
|
||||
def _load_models(self):
|
||||
"""Load existing models from disk"""
|
||||
"""Load existing models from disk using CheckpointManager"""
|
||||
try:
|
||||
for symbol in self.symbols:
|
||||
symbol_safe = symbol.replace('/', '_')
|
||||
model_path = os.path.join(self.model_checkpoint_dir, f"{symbol_safe}_model.pt")
|
||||
model_name = f"cob_rl_{symbol.replace('/', '_').lower()}" # Standardize model name for CheckpointManager
|
||||
|
||||
if os.path.exists(model_path):
|
||||
loaded_checkpoint = self.checkpoint_manager.load_best_checkpoint(model_name)
|
||||
|
||||
if loaded_checkpoint:
|
||||
model_path, metadata = loaded_checkpoint
|
||||
checkpoint = torch.load(model_path, map_location=self.device)
|
||||
|
||||
self.models[symbol].load_state_dict(checkpoint['model_state_dict'])
|
||||
@ -1059,9 +1070,9 @@ class RealtimeRLCOBTrader:
|
||||
if 'inference_stats' in checkpoint:
|
||||
self.inference_stats[symbol].update(checkpoint['inference_stats'])
|
||||
|
||||
logger.info(f"Loaded existing model for {symbol}")
|
||||
logger.info(f"Loaded existing model for {symbol} from checkpoint: {metadata.checkpoint_id}")
|
||||
else:
|
||||
logger.info(f"No existing model found for {symbol}, starting fresh")
|
||||
logger.info(f"No existing model found for {symbol} via CheckpointManager, starting fresh.")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading models: {e}")
|
||||
@ -1111,7 +1122,7 @@ async def main():
|
||||
from ..core.trading_executor import TradingExecutor
|
||||
|
||||
# Initialize trading executor (simulation mode)
|
||||
trading_executor = TradingExecutor(simulation_mode=True)
|
||||
trading_executor = TradingExecutor()
|
||||
|
||||
# Initialize real-time RL trader
|
||||
trader = RealtimeRLCOBTrader(
|
||||
|
@ -93,7 +93,6 @@ class TradingExecutor:
|
||||
api_key=api_key,
|
||||
api_secret=api_secret,
|
||||
test_mode=exchange_test_mode,
|
||||
trading_mode=trading_mode
|
||||
)
|
||||
|
||||
# Trading state
|
||||
@ -213,9 +212,15 @@ class TradingExecutor:
|
||||
# Determine the quote asset (e.g., USDT, USDC) from the symbol
|
||||
if '/' in symbol:
|
||||
quote_asset = symbol.split('/')[1].upper() # Assuming symbol is like ETH/USDT
|
||||
# Convert USDT to USDC for MEXC spot trading
|
||||
if quote_asset == 'USDT':
|
||||
quote_asset = 'USDC'
|
||||
else:
|
||||
# Fallback for symbols like ETHUSDT (assuming last 4 chars are quote)
|
||||
quote_asset = symbol[-4:].upper()
|
||||
# Convert USDT to USDC for MEXC spot trading
|
||||
if quote_asset == 'USDT':
|
||||
quote_asset = 'USDC'
|
||||
|
||||
# Calculate required capital for the trade
|
||||
# If we are selling (to open a short position), we need collateral based on the position size
|
||||
@ -779,13 +784,14 @@ class TradingExecutor:
|
||||
logger.info("Daily trading statistics reset")
|
||||
|
||||
def get_account_balance(self) -> Dict[str, Dict[str, float]]:
|
||||
"""Get account balance information from MEXC
|
||||
"""Get account balance information from MEXC, including spot and futures.
|
||||
|
||||
Returns:
|
||||
Dict with asset balances in format:
|
||||
{
|
||||
'USDT': {'free': 100.0, 'locked': 0.0},
|
||||
'ETH': {'free': 0.5, 'locked': 0.0},
|
||||
'USDT': {'free': 100.0, 'locked': 0.0, 'total': 100.0, 'type': 'spot'},
|
||||
'ETH': {'free': 0.5, 'locked': 0.0, 'total': 0.5, 'type': 'spot'},
|
||||
'FUTURES_USDT': {'free': 500.0, 'locked': 50.0, 'total': 550.0, 'type': 'futures'}
|
||||
...
|
||||
}
|
||||
"""
|
||||
@ -794,28 +800,47 @@ class TradingExecutor:
|
||||
logger.error("Exchange interface not available")
|
||||
return {}
|
||||
|
||||
# Get account info from MEXC
|
||||
account_info = self.exchange.get_account_info()
|
||||
if not account_info:
|
||||
logger.error("Failed to get account info from MEXC")
|
||||
return {}
|
||||
combined_balances = {}
|
||||
|
||||
balances = {}
|
||||
for balance in account_info.get('balances', []):
|
||||
asset = balance.get('asset', '')
|
||||
free = float(balance.get('free', 0))
|
||||
locked = float(balance.get('locked', 0))
|
||||
# 1. Get Spot Account Info
|
||||
spot_account_info = self.exchange.get_account_info()
|
||||
if spot_account_info and 'balances' in spot_account_info:
|
||||
for balance in spot_account_info['balances']:
|
||||
asset = balance.get('asset', '')
|
||||
free = float(balance.get('free', 0))
|
||||
locked = float(balance.get('locked', 0))
|
||||
if free > 0 or locked > 0:
|
||||
combined_balances[asset] = {
|
||||
'free': free,
|
||||
'locked': locked,
|
||||
'total': free + locked,
|
||||
'type': 'spot'
|
||||
}
|
||||
else:
|
||||
logger.warning("Failed to get spot account info from MEXC or no balances found.")
|
||||
|
||||
# Only include assets with non-zero balance
|
||||
if free > 0 or locked > 0:
|
||||
balances[asset] = {
|
||||
'free': free,
|
||||
'locked': locked,
|
||||
'total': free + locked
|
||||
}
|
||||
# 2. Get Futures Account Info (commented out until futures API is implemented)
|
||||
# futures_account_info = self.exchange.get_futures_account_info()
|
||||
# if futures_account_info:
|
||||
# for currency, asset_data in futures_account_info.items():
|
||||
# # MEXC Futures API returns 'availableBalance' and 'frozenBalance'
|
||||
# free = float(asset_data.get('availableBalance', 0))
|
||||
# locked = float(asset_data.get('frozenBalance', 0))
|
||||
# total = free + locked # total is the sum of available and frozen
|
||||
# if free > 0 or locked > 0:
|
||||
# # Prefix with 'FUTURES_' to distinguish from spot, or decide on a unified key
|
||||
# # For now, let's keep them distinct for clarity
|
||||
# combined_balances[f'FUTURES_{currency}'] = {
|
||||
# 'free': free,
|
||||
# 'locked': locked,
|
||||
# 'total': total,
|
||||
# 'type': 'futures'
|
||||
# }
|
||||
# else:
|
||||
# logger.warning("Failed to get futures account info from MEXC or no futures assets found.")
|
||||
|
||||
logger.info(f"Retrieved balances for {len(balances)} assets")
|
||||
return balances
|
||||
logger.info(f"Retrieved combined balances for {len(combined_balances)} assets.")
|
||||
return combined_balances
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting account balance: {e}")
|
||||
|
@ -133,19 +133,37 @@ class CheckpointManager:
|
||||
|
||||
def load_best_checkpoint(self, model_name: str) -> Optional[Tuple[str, CheckpointMetadata]]:
|
||||
try:
|
||||
if model_name not in self.checkpoints or not self.checkpoints[model_name]:
|
||||
logger.warning(f"No checkpoints found for model: {model_name}")
|
||||
return None
|
||||
# First, try the standard checkpoint system
|
||||
if model_name in self.checkpoints and self.checkpoints[model_name]:
|
||||
# Filter out checkpoints with non-existent files
|
||||
valid_checkpoints = [
|
||||
cp for cp in self.checkpoints[model_name]
|
||||
if Path(cp.file_path).exists()
|
||||
]
|
||||
|
||||
best_checkpoint = max(self.checkpoints[model_name], key=lambda x: x.performance_score)
|
||||
if valid_checkpoints:
|
||||
best_checkpoint = max(valid_checkpoints, key=lambda x: x.performance_score)
|
||||
logger.debug(f"Loading best checkpoint for {model_name}: {best_checkpoint.checkpoint_id}")
|
||||
return best_checkpoint.file_path, best_checkpoint
|
||||
else:
|
||||
# Clean up invalid metadata entries
|
||||
invalid_count = len(self.checkpoints[model_name])
|
||||
logger.warning(f"Found {invalid_count} invalid checkpoint entries for {model_name}, cleaning up metadata")
|
||||
self.checkpoints[model_name] = []
|
||||
self._save_metadata()
|
||||
|
||||
if not Path(best_checkpoint.file_path).exists():
|
||||
# temporary disable logging to avoid spam
|
||||
# logger.error(f"Best checkpoint file not found: {best_checkpoint.file_path}")
|
||||
return None
|
||||
# Fallback: Look for existing saved models in the legacy format
|
||||
logger.debug(f"No valid checkpoints found for model: {model_name}, attempting to find legacy saved models")
|
||||
legacy_model_path = self._find_legacy_model(model_name)
|
||||
|
||||
logger.debug(f"Loading best checkpoint for {model_name}: {best_checkpoint.checkpoint_id}")
|
||||
return best_checkpoint.file_path, best_checkpoint
|
||||
if legacy_model_path:
|
||||
# Create checkpoint metadata for the legacy model using actual file data
|
||||
legacy_metadata = self._create_legacy_metadata(model_name, legacy_model_path)
|
||||
logger.debug(f"Found legacy model for {model_name}: {legacy_model_path}")
|
||||
return str(legacy_model_path), legacy_metadata
|
||||
|
||||
logger.warning(f"No checkpoints or legacy models found for: {model_name}")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading best checkpoint for {model_name}: {e}")
|
||||
@ -181,16 +199,8 @@ class CheckpointManager:
|
||||
# Bonus for processing more training samples
|
||||
score += min(10, metrics['training_samples'] / 10)
|
||||
|
||||
# Ensure minimum score for any training activity
|
||||
if score == 0.0 and metrics:
|
||||
# Use the first available metric with better scaling
|
||||
first_metric = next(iter(metrics.values()))
|
||||
if first_metric > 0:
|
||||
score = max(0.1, min(10, first_metric))
|
||||
else:
|
||||
score = 0.1
|
||||
|
||||
return max(score, 0.1)
|
||||
# Return actual calculated score - NO SYNTHETIC MINIMUM
|
||||
return score
|
||||
|
||||
def _should_save_checkpoint(self, model_name: str, performance_score: float) -> bool:
|
||||
"""Improved checkpoint saving logic with more frequent saves during training"""
|
||||
@ -332,6 +342,110 @@ class CheckpointManager:
|
||||
|
||||
return stats
|
||||
|
||||
def _find_legacy_model(self, model_name: str) -> Optional[Path]:
|
||||
"""Find legacy saved models based on model name patterns"""
|
||||
base_dir = Path(self.base_dir)
|
||||
|
||||
# Define model name mappings and patterns for legacy files
|
||||
legacy_patterns = {
|
||||
'dqn_agent': [
|
||||
'dqn_agent_best_policy.pt',
|
||||
'enhanced_dqn_best_policy.pt',
|
||||
'improved_dqn_agent_best_policy.pt',
|
||||
'dqn_agent_final_policy.pt'
|
||||
],
|
||||
'enhanced_cnn': [
|
||||
'cnn_model_best.pt',
|
||||
'optimized_short_term_model_best.pt',
|
||||
'optimized_short_term_model_realtime_best.pt',
|
||||
'optimized_short_term_model_ticks_best.pt'
|
||||
],
|
||||
'extrema_trainer': [
|
||||
'supervised_model_best.pt'
|
||||
],
|
||||
'cob_rl': [
|
||||
'best_rl_model.pth_policy.pt',
|
||||
'rl_agent_best_policy.pt'
|
||||
],
|
||||
'decision': [
|
||||
# Decision models might be in subdirectories, but let's check main dir too
|
||||
'decision_best.pt',
|
||||
'decision_model_best.pt',
|
||||
# Check for transformer models which might be used as decision models
|
||||
'enhanced_dqn_best_policy.pt',
|
||||
'improved_dqn_agent_best_policy.pt'
|
||||
]
|
||||
}
|
||||
|
||||
# Get patterns for this model name
|
||||
patterns = legacy_patterns.get(model_name, [])
|
||||
|
||||
# Also try generic patterns based on model name
|
||||
patterns.extend([
|
||||
f'{model_name}_best.pt',
|
||||
f'{model_name}_best_policy.pt',
|
||||
f'{model_name}_final.pt',
|
||||
f'{model_name}_final_policy.pt'
|
||||
])
|
||||
|
||||
# Search for the model files
|
||||
for pattern in patterns:
|
||||
candidate_path = base_dir / pattern
|
||||
if candidate_path.exists():
|
||||
logger.debug(f"Found legacy model file: {candidate_path}")
|
||||
return candidate_path
|
||||
|
||||
# Also check subdirectories
|
||||
for subdir in base_dir.iterdir():
|
||||
if subdir.is_dir() and subdir.name == model_name:
|
||||
for pattern in patterns:
|
||||
candidate_path = subdir / pattern
|
||||
if candidate_path.exists():
|
||||
logger.debug(f"Found legacy model file in subdirectory: {candidate_path}")
|
||||
return candidate_path
|
||||
|
||||
return None
|
||||
|
||||
def _create_legacy_metadata(self, model_name: str, file_path: Path) -> CheckpointMetadata:
|
||||
"""Create metadata for legacy model files using only actual file information"""
|
||||
try:
|
||||
file_size_mb = file_path.stat().st_size / (1024 * 1024)
|
||||
created_time = datetime.fromtimestamp(file_path.stat().st_mtime)
|
||||
|
||||
# NO SYNTHETIC DATA - use only actual file information
|
||||
return CheckpointMetadata(
|
||||
checkpoint_id=f"legacy_{model_name}_{int(created_time.timestamp())}",
|
||||
model_name=model_name,
|
||||
model_type=model_name,
|
||||
file_path=str(file_path),
|
||||
created_at=created_time,
|
||||
file_size_mb=file_size_mb,
|
||||
performance_score=0.0, # Unknown performance - use 0, not synthetic values
|
||||
accuracy=None,
|
||||
loss=None,
|
||||
val_accuracy=None,
|
||||
val_loss=None,
|
||||
reward=None,
|
||||
pnl=None,
|
||||
epoch=None,
|
||||
training_time_hours=None,
|
||||
total_parameters=None,
|
||||
wandb_run_id=None,
|
||||
wandb_artifact_name=None
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating legacy metadata for {model_name}: {e}")
|
||||
# Return a basic metadata with minimal info - NO SYNTHETIC VALUES
|
||||
return CheckpointMetadata(
|
||||
checkpoint_id=f"legacy_{model_name}",
|
||||
model_name=model_name,
|
||||
model_type=model_name,
|
||||
file_path=str(file_path),
|
||||
created_at=datetime.now(),
|
||||
file_size_mb=0.0,
|
||||
performance_score=0.0 # Unknown - use 0, not synthetic
|
||||
)
|
||||
|
||||
_checkpoint_manager = None
|
||||
|
||||
def get_checkpoint_manager() -> CheckpointManager:
|
||||
|
@ -1934,11 +1934,13 @@ class CleanTradingDashboard:
|
||||
|
||||
# Fallback if orchestrator not available or returns None
|
||||
if model_states is None:
|
||||
# FIXED: No longer using hardcoded placeholder loss values
|
||||
# Dashboard should show "No Data" or actual training status instead
|
||||
model_states = {
|
||||
'dqn': {'initial_loss': 0.2850, 'current_loss': 0.0145, 'best_loss': 0.0098, 'checkpoint_loaded': False},
|
||||
'cnn': {'initial_loss': 0.4120, 'current_loss': 0.0187, 'best_loss': 0.0134, 'checkpoint_loaded': False},
|
||||
'cob_rl': {'initial_loss': 0.3560, 'current_loss': 0.0098, 'best_loss': 0.0076, 'checkpoint_loaded': False},
|
||||
'decision': {'initial_loss': 0.2980, 'current_loss': 0.0089, 'best_loss': 0.0065, 'checkpoint_loaded': False}
|
||||
'dqn': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False},
|
||||
'cnn': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False},
|
||||
'cob_rl': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False},
|
||||
'decision': {'initial_loss': None, 'current_loss': None, 'best_loss': None, 'checkpoint_loaded': False}
|
||||
}
|
||||
|
||||
# Get latest predictions from all models
|
||||
@ -1956,6 +1958,13 @@ class CleanTradingDashboard:
|
||||
except (TypeError, ZeroDivisionError):
|
||||
return default_improvement
|
||||
|
||||
# Helper function to format loss values
|
||||
def format_loss_value(loss_value: Optional[float]) -> str:
|
||||
"""Format loss value for display, showing 'No Data' for None values"""
|
||||
if loss_value is None:
|
||||
return "No Data"
|
||||
return f"{loss_value:.4f}"
|
||||
|
||||
# Helper function to get timing information
|
||||
def get_model_timing_info(model_name: str) -> Dict[str, Any]:
|
||||
timing = {
|
||||
@ -2041,12 +2050,12 @@ class CleanTradingDashboard:
|
||||
},
|
||||
# FIXED: Get REAL loss values from orchestrator model, not placeholders
|
||||
'loss_5ma': self._get_real_model_loss('dqn'),
|
||||
'initial_loss': dqn_state.get('initial_loss', 0.2850),
|
||||
'initial_loss': dqn_state.get('initial_loss'), # No fallback - show None if unknown
|
||||
'best_loss': self._get_real_best_loss('dqn'),
|
||||
'improvement': safe_improvement_calc(
|
||||
dqn_state.get('initial_loss', 0.2850),
|
||||
dqn_state.get('initial_loss'),
|
||||
self._get_real_model_loss('dqn'),
|
||||
0.0 if not dqn_active else 94.9 # Default if no real improvement available
|
||||
0.0 # No synthetic default improvement
|
||||
),
|
||||
'checkpoint_loaded': dqn_checkpoint_loaded,
|
||||
'model_type': 'DQN',
|
||||
@ -2109,13 +2118,13 @@ class CleanTradingDashboard:
|
||||
'predicted_price': cnn_predicted_price,
|
||||
'type': cnn_latest.get('type', 'cnn_pivot') if cnn_latest else 'cnn_pivot'
|
||||
},
|
||||
'loss_5ma': cnn_state.get('current_loss', 0.0187),
|
||||
'initial_loss': cnn_state.get('initial_loss', 0.4120),
|
||||
'best_loss': cnn_state.get('best_loss', 0.0134),
|
||||
'loss_5ma': cnn_state.get('current_loss'),
|
||||
'initial_loss': cnn_state.get('initial_loss'),
|
||||
'best_loss': cnn_state.get('best_loss'),
|
||||
'improvement': safe_improvement_calc(
|
||||
cnn_state.get('initial_loss', 0.4120),
|
||||
cnn_state.get('current_loss', 0.0187),
|
||||
95.5 # Default improvement percentage
|
||||
cnn_state.get('initial_loss'),
|
||||
cnn_state.get('current_loss'),
|
||||
0.0 # No synthetic default improvement
|
||||
),
|
||||
'checkpoint_loaded': cnn_state.get('checkpoint_loaded', False),
|
||||
'model_type': 'CNN',
|
||||
@ -3948,11 +3957,11 @@ class CleanTradingDashboard:
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
def _get_real_model_loss(self, model_name: str) -> float:
|
||||
def _get_real_model_loss(self, model_name: str) -> Optional[float]:
|
||||
"""Get REAL current loss from the actual model, not placeholders"""
|
||||
try:
|
||||
if not self.orchestrator:
|
||||
return 0.2850 # Default fallback
|
||||
return None # No orchestrator = no real data
|
||||
|
||||
if model_name == 'dqn' and hasattr(self.orchestrator, 'rl_agent') and self.orchestrator.rl_agent:
|
||||
# Get real loss from DQN agent
|
||||
@ -3961,8 +3970,8 @@ class CleanTradingDashboard:
|
||||
# Average of last 50 losses for current loss
|
||||
recent_losses = agent.losses[-50:]
|
||||
return sum(recent_losses) / len(recent_losses)
|
||||
elif hasattr(agent, 'current_loss'):
|
||||
return float(getattr(agent, 'current_loss', 0.2850))
|
||||
elif hasattr(agent, 'current_loss') and agent.current_loss is not None:
|
||||
return float(agent.current_loss)
|
||||
|
||||
elif model_name == 'cnn' and hasattr(self.orchestrator, 'cnn_model') and self.orchestrator.cnn_model:
|
||||
# Get real loss from CNN model
|
||||
@ -3970,8 +3979,8 @@ class CleanTradingDashboard:
|
||||
if hasattr(model, 'training_losses') and len(getattr(model, 'training_losses',[])) > 0:
|
||||
recent_losses = getattr(model, 'training_losses',[])[-50:]
|
||||
return sum(recent_losses) / len(recent_losses)
|
||||
elif hasattr(model, 'current_loss'):
|
||||
return float(getattr(model, 'current_loss', 0.2850))
|
||||
elif hasattr(model, 'current_loss') and model.current_loss is not None:
|
||||
return float(model.current_loss)
|
||||
|
||||
elif model_name == 'decision' and hasattr(self.orchestrator, 'decision_fusion_network'):
|
||||
# Get real loss from decision fusion
|
||||
@ -3983,45 +3992,45 @@ class CleanTradingDashboard:
|
||||
# Fallback to model states
|
||||
model_states = self.orchestrator.get_model_states() if hasattr(self.orchestrator, 'get_model_states') else {}
|
||||
state = model_states.get(model_name, {})
|
||||
return state.get('current_loss', 0.2850)
|
||||
return state.get('current_loss') # Return None if no real data
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting real loss for {model_name}: {e}")
|
||||
return 0.2850 # Safe fallback
|
||||
return None # Return None instead of synthetic data
|
||||
|
||||
def _get_real_best_loss(self, model_name: str) -> float:
|
||||
def _get_real_best_loss(self, model_name: str) -> Optional[float]:
|
||||
"""Get REAL best loss from the actual model"""
|
||||
try:
|
||||
if not self.orchestrator:
|
||||
return 0.0145 # Default fallback
|
||||
return None # No orchestrator = no real data
|
||||
|
||||
if model_name == 'dqn' and hasattr(self.orchestrator, 'rl_agent') and self.orchestrator.rl_agent:
|
||||
agent = self.orchestrator.rl_agent
|
||||
if hasattr(agent, 'best_loss'):
|
||||
return float(getattr(agent, 'best_loss', 0.0145))
|
||||
if hasattr(agent, 'best_loss') and agent.best_loss is not None:
|
||||
return float(agent.best_loss)
|
||||
elif hasattr(agent, 'losses') and len(agent.losses) > 0:
|
||||
return min(agent.losses)
|
||||
|
||||
elif model_name == 'cnn' and hasattr(self.orchestrator, 'cnn_model') and self.orchestrator.cnn_model:
|
||||
model = self.orchestrator.cnn_model
|
||||
if hasattr(model, 'best_loss'):
|
||||
return float(getattr(model, 'best_loss', 0.0145))
|
||||
if hasattr(model, 'best_loss') and model.best_loss is not None:
|
||||
return float(model.best_loss)
|
||||
elif hasattr(model, 'training_losses') and len(getattr(model, 'training_losses', [])) > 0:
|
||||
return min(getattr(model, 'training_losses', [0.0145]))
|
||||
return min(getattr(model, 'training_losses', []))
|
||||
|
||||
elif model_name == 'decision' and hasattr(self.orchestrator, 'fusion_training_data'):
|
||||
if len(self.orchestrator.fusion_training_data) > 0:
|
||||
all_losses = [entry['loss'] for entry in self.orchestrator.fusion_training_data]
|
||||
return min(all_losses) if all_losses else 0.0065
|
||||
return min(all_losses) if all_losses else None
|
||||
|
||||
# Fallback to model states
|
||||
model_states = self.orchestrator.get_model_states() if hasattr(self.orchestrator, 'get_model_states') else {}
|
||||
state = model_states.get(model_name, {})
|
||||
return state.get('best_loss', 0.0145)
|
||||
return state.get('best_loss') # Return None if no real data
|
||||
|
||||
except Exception as e:
|
||||
logger.debug(f"Error getting best loss for {model_name}: {e}")
|
||||
return 0.0145 # Safe fallback
|
||||
return None # Return None instead of synthetic data
|
||||
|
||||
def _clear_old_signals_for_tick_range(self):
|
||||
"""Clear old signals that are outside the current tick cache time range - VERY CONSERVATIVE"""
|
||||
|
Reference in New Issue
Block a user