gogo2/model_parameter_audit_report.json
2025-05-24 23:22:34 +03:00

2358 lines
81 KiB
JSON

{
"timestamp": "N/A",
"pytorch_version": "2.6.0+cu118",
"cuda_available": true,
"device_info": {
"cuda_device_count": 1,
"current_device": "0"
},
"model_architectures": {
"enhanced_cnn": [
{
"model_name": "EnhancedCNN_Optimized",
"input_shape": [
5,
100
],
"total_parameters": 168296366,
"trainable_parameters": 168296366,
"size_mb": 642.2225341796875,
"layer_breakdown": [
{
"layer_name": "conv_layers.0",
"layer_type": "Conv1d",
"parameters": 9216,
"trainable": 9216
},
{
"layer_name": "conv_layers.1",
"layer_type": "BatchNorm1d",
"parameters": 512,
"trainable": 512
},
{
"layer_name": "conv_layers.4.bn1",
"layer_type": "BatchNorm1d",
"parameters": 512,
"trainable": 512
},
{
"layer_name": "conv_layers.4.conv1",
"layer_type": "Conv1d",
"parameters": 393216,
"trainable": 393216
},
{
"layer_name": "conv_layers.4.bn2",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.4.conv2",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.4.shortcut.0",
"layer_type": "Conv1d",
"parameters": 131072,
"trainable": 131072
},
{
"layer_name": "conv_layers.5.bn1",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.5.conv1",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.5.bn2",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.5.conv2",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.6.bn1",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.6.conv1",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.6.bn2",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.6.conv2",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.9.bn1",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.9.conv1",
"layer_type": "Conv1d",
"parameters": 1572864,
"trainable": 1572864
},
{
"layer_name": "conv_layers.9.bn2",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.9.conv2",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.9.shortcut.0",
"layer_type": "Conv1d",
"parameters": 524288,
"trainable": 524288
},
{
"layer_name": "conv_layers.10.bn1",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.10.conv1",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.10.bn2",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.10.conv2",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.11.bn1",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.11.conv1",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.11.bn2",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.11.conv2",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.14.bn1",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.14.conv1",
"layer_type": "Conv1d",
"parameters": 4718592,
"trainable": 4718592
},
{
"layer_name": "conv_layers.14.bn2",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.14.conv2",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.14.shortcut.0",
"layer_type": "Conv1d",
"parameters": 1572864,
"trainable": 1572864
},
{
"layer_name": "conv_layers.15.bn1",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.15.conv1",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.15.bn2",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.15.conv2",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.16.bn1",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.16.conv1",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.16.bn2",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.16.conv2",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.19.bn1",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.19.conv1",
"layer_type": "Conv1d",
"parameters": 9437184,
"trainable": 9437184
},
{
"layer_name": "conv_layers.19.bn2",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.19.conv2",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.19.shortcut.0",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.20.bn1",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.20.conv1",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.20.bn2",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.20.conv2",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.21.bn1",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.21.conv1",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.21.bn2",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.21.conv2",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "fc1",
"layer_type": "Linear",
"parameters": 4196352,
"trainable": 4196352
},
{
"layer_name": "fc_layers.3",
"layer_type": "Linear",
"parameters": 4196352,
"trainable": 4196352
},
{
"layer_name": "fc_layers.6",
"layer_type": "Linear",
"parameters": 3147264,
"trainable": 3147264
},
{
"layer_name": "fc_layers.9",
"layer_type": "Linear",
"parameters": 1573888,
"trainable": 1573888
},
{
"layer_name": "fc_layers.12",
"layer_type": "Linear",
"parameters": 787200,
"trainable": 787200
},
{
"layer_name": "price_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "price_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "price_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volume_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volume_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volume_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "trend_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "trend_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "trend_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volatility_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volatility_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volatility_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "attention_fusion.0",
"layer_type": "Linear",
"parameters": 3146752,
"trainable": 3146752
},
{
"layer_name": "attention_fusion.3",
"layer_type": "Linear",
"parameters": 787200,
"trainable": 787200
},
{
"layer_name": "advantage_stream.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "advantage_stream.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "advantage_stream.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "advantage_stream.8",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "value_stream.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "value_stream.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "value_stream.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "value_stream.8",
"layer_type": "Linear",
"parameters": 129,
"trainable": 129
},
{
"layer_name": "extrema_head.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "extrema_head.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "extrema_head.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "extrema_head.8",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_immediate.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "price_pred_immediate.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_immediate.5",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_midterm.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "price_pred_midterm.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_midterm.5",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_longterm.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "price_pred_longterm.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_longterm.5",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_value.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "price_pred_value.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "price_pred_value.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_value.8",
"layer_type": "Linear",
"parameters": 1032,
"trainable": 1032
},
{
"layer_name": "volatility_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "volatility_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "volatility_head.5",
"layer_type": "Linear",
"parameters": 645,
"trainable": 645
},
{
"layer_name": "support_resistance_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "support_resistance_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "support_resistance_head.5",
"layer_type": "Linear",
"parameters": 774,
"trainable": 774
},
{
"layer_name": "market_regime_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "market_regime_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "market_regime_head.5",
"layer_type": "Linear",
"parameters": 903,
"trainable": 903
},
{
"layer_name": "risk_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "risk_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "risk_head.5",
"layer_type": "Linear",
"parameters": 516,
"trainable": 516
}
]
}
],
"dqn_agent": [
{
"model_name": "DQNAgent_EnhancedCNN",
"state_shape": [
5,
100
],
"policy_parameters": 168296366,
"target_parameters": 168296366,
"total_parameters": 336592732,
"size_mb": 1284.445068359375,
"layer_breakdown": [
{
"layer_name": "conv_layers.0",
"layer_type": "Conv1d",
"parameters": 9216,
"trainable": 9216
},
{
"layer_name": "conv_layers.1",
"layer_type": "BatchNorm1d",
"parameters": 512,
"trainable": 512
},
{
"layer_name": "conv_layers.4.bn1",
"layer_type": "BatchNorm1d",
"parameters": 512,
"trainable": 512
},
{
"layer_name": "conv_layers.4.conv1",
"layer_type": "Conv1d",
"parameters": 393216,
"trainable": 393216
},
{
"layer_name": "conv_layers.4.bn2",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.4.conv2",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.4.shortcut.0",
"layer_type": "Conv1d",
"parameters": 131072,
"trainable": 131072
},
{
"layer_name": "conv_layers.5.bn1",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.5.conv1",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.5.bn2",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.5.conv2",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.6.bn1",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.6.conv1",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.6.bn2",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.6.conv2",
"layer_type": "Conv1d",
"parameters": 786432,
"trainable": 786432
},
{
"layer_name": "conv_layers.9.bn1",
"layer_type": "BatchNorm1d",
"parameters": 1024,
"trainable": 1024
},
{
"layer_name": "conv_layers.9.conv1",
"layer_type": "Conv1d",
"parameters": 1572864,
"trainable": 1572864
},
{
"layer_name": "conv_layers.9.bn2",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.9.conv2",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.9.shortcut.0",
"layer_type": "Conv1d",
"parameters": 524288,
"trainable": 524288
},
{
"layer_name": "conv_layers.10.bn1",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.10.conv1",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.10.bn2",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.10.conv2",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.11.bn1",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.11.conv1",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.11.bn2",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.11.conv2",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.14.bn1",
"layer_type": "BatchNorm1d",
"parameters": 2048,
"trainable": 2048
},
{
"layer_name": "conv_layers.14.conv1",
"layer_type": "Conv1d",
"parameters": 4718592,
"trainable": 4718592
},
{
"layer_name": "conv_layers.14.bn2",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.14.conv2",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.14.shortcut.0",
"layer_type": "Conv1d",
"parameters": 1572864,
"trainable": 1572864
},
{
"layer_name": "conv_layers.15.bn1",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.15.conv1",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.15.bn2",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.15.conv2",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.16.bn1",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.16.conv1",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.16.bn2",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.16.conv2",
"layer_type": "Conv1d",
"parameters": 7077888,
"trainable": 7077888
},
{
"layer_name": "conv_layers.19.bn1",
"layer_type": "BatchNorm1d",
"parameters": 3072,
"trainable": 3072
},
{
"layer_name": "conv_layers.19.conv1",
"layer_type": "Conv1d",
"parameters": 9437184,
"trainable": 9437184
},
{
"layer_name": "conv_layers.19.bn2",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.19.conv2",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.19.shortcut.0",
"layer_type": "Conv1d",
"parameters": 3145728,
"trainable": 3145728
},
{
"layer_name": "conv_layers.20.bn1",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.20.conv1",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.20.bn2",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.20.conv2",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.21.bn1",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.21.conv1",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "conv_layers.21.bn2",
"layer_type": "BatchNorm1d",
"parameters": 4096,
"trainable": 4096
},
{
"layer_name": "conv_layers.21.conv2",
"layer_type": "Conv1d",
"parameters": 12582912,
"trainable": 12582912
},
{
"layer_name": "fc1",
"layer_type": "Linear",
"parameters": 4196352,
"trainable": 4196352
},
{
"layer_name": "fc_layers.3",
"layer_type": "Linear",
"parameters": 4196352,
"trainable": 4196352
},
{
"layer_name": "fc_layers.6",
"layer_type": "Linear",
"parameters": 3147264,
"trainable": 3147264
},
{
"layer_name": "fc_layers.9",
"layer_type": "Linear",
"parameters": 1573888,
"trainable": 1573888
},
{
"layer_name": "fc_layers.12",
"layer_type": "Linear",
"parameters": 787200,
"trainable": 787200
},
{
"layer_name": "price_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "price_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "price_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volume_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volume_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volume_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "trend_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "trend_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "trend_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volatility_attention.query",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volatility_attention.key",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "volatility_attention.value",
"layer_type": "Linear",
"parameters": 590592,
"trainable": 590592
},
{
"layer_name": "attention_fusion.0",
"layer_type": "Linear",
"parameters": 3146752,
"trainable": 3146752
},
{
"layer_name": "attention_fusion.3",
"layer_type": "Linear",
"parameters": 787200,
"trainable": 787200
},
{
"layer_name": "advantage_stream.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "advantage_stream.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "advantage_stream.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "advantage_stream.8",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "value_stream.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "value_stream.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "value_stream.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "value_stream.8",
"layer_type": "Linear",
"parameters": 129,
"trainable": 129
},
{
"layer_name": "extrema_head.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "extrema_head.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "extrema_head.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "extrema_head.8",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_immediate.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "price_pred_immediate.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_immediate.5",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_midterm.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "price_pred_midterm.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_midterm.5",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_longterm.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "price_pred_longterm.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_longterm.5",
"layer_type": "Linear",
"parameters": 387,
"trainable": 387
},
{
"layer_name": "price_pred_value.0",
"layer_type": "Linear",
"parameters": 393728,
"trainable": 393728
},
{
"layer_name": "price_pred_value.3",
"layer_type": "Linear",
"parameters": 131328,
"trainable": 131328
},
{
"layer_name": "price_pred_value.6",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "price_pred_value.8",
"layer_type": "Linear",
"parameters": 1032,
"trainable": 1032
},
{
"layer_name": "volatility_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "volatility_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "volatility_head.5",
"layer_type": "Linear",
"parameters": 645,
"trainable": 645
},
{
"layer_name": "support_resistance_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "support_resistance_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "support_resistance_head.5",
"layer_type": "Linear",
"parameters": 774,
"trainable": 774
},
{
"layer_name": "market_regime_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "market_regime_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "market_regime_head.5",
"layer_type": "Linear",
"parameters": 903,
"trainable": 903
},
{
"layer_name": "risk_head.0",
"layer_type": "Linear",
"parameters": 196864,
"trainable": 196864
},
{
"layer_name": "risk_head.3",
"layer_type": "Linear",
"parameters": 32896,
"trainable": 32896
},
{
"layer_name": "risk_head.5",
"layer_type": "Linear",
"parameters": 516,
"trainable": 516
}
]
}
]
},
"saved_models": [
{
"filename": "cnn_best.pt.pt",
"path": "models/cnn_best.pt.pt",
"size_mb": 33.12374496459961,
"estimated_parameters": 2894410,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes"
]
},
{
"filename": "cnn_BTC_USDT_20250329_021448.pt",
"path": "models/cnn_BTC_USDT_20250329_021448.pt",
"size_mb": 26.9183931350708,
"estimated_parameters": 2350794,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes"
]
},
{
"filename": "cnn_BTC_USDT_20250329_021800.pt",
"path": "models/cnn_BTC_USDT_20250329_021800.pt",
"size_mb": 26.9523286819458,
"estimated_parameters": 2350794,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes"
]
},
{
"filename": "cnn_BTC_USD_20250329_015217.pt",
"path": "models/cnn_BTC_USD_20250329_015217.pt",
"size_mb": 1.9763126373291016,
"estimated_parameters": 170889,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes"
]
},
{
"filename": "cnn_BTC_USD_20250329_020430.pt",
"path": "models/cnn_BTC_USD_20250329_020430.pt",
"size_mb": 32.90281295776367,
"estimated_parameters": 2873740,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes"
]
},
{
"filename": "cnn_BTC_USD_20250329_020711.pt",
"path": "models/cnn_BTC_USD_20250329_020711.pt",
"size_mb": 32.90281295776367,
"estimated_parameters": 2873740,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes"
]
},
{
"filename": "cnn_final_20250331_001817.pt.pt",
"path": "models/cnn_final_20250331_001817.pt.pt",
"size_mb": 46.44105339050293,
"estimated_parameters": 12168195,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes",
"confidence_threshold",
"max_consecutive_same_action",
"action_counts",
"last_actions",
"model_version",
"timestamp"
]
},
{
"filename": "trading_agent_best_net_pnl.pt",
"path": "models/trading_agent_best_net_pnl.pt",
"size_mb": 39.7817268371582,
"estimated_parameters": 10424842,
"checkpoint_keys": [
"policy_net",
"target_net",
"optimizer",
"epsilon"
]
},
{
"filename": "trading_agent_best_pnl.pt",
"path": "models/trading_agent_best_pnl.pt",
"size_mb": 110.63929557800293,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\nPlease file an issue with the following so that we can make `weights_only=True` compatible with your use case: WeightsUnpickler error: Unsupported operand 149\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "trading_agent_best_reward.pt",
"path": "models/trading_agent_best_reward.pt",
"size_mb": 110.63994789123535,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\nPlease file an issue with the following so that we can make `weights_only=True` compatible with your use case: WeightsUnpickler error: Unsupported operand 149\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "trading_agent_final.pt",
"path": "models/trading_agent_final.pt",
"size_mb": 110.63858222961426,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\nPlease file an issue with the following so that we can make `weights_only=True` compatible with your use case: WeightsUnpickler error: Unsupported operand 149\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "trading_agent_live_trained.pt",
"path": "models/trading_agent_live_trained.pt",
"size_mb": 17.43359375,
"estimated_parameters": "Error loading",
"error": "PytorchStreamReader failed reading zip archive: failed finding central directory"
},
{
"filename": "best_rl_model.pth_agent_state.pt",
"path": "NN/models/saved/best_rl_model.pth_agent_state.pt",
"size_mb": 11.303743362426758,
"estimated_parameters": 0,
"checkpoint_keys": [
"epsilon",
"update_count",
"losses",
"optimizer_state"
]
},
{
"filename": "best_rl_model.pth_policy.pt",
"path": "NN/models/saved/best_rl_model.pth_policy.pt",
"size_mb": 5.6540985107421875,
"estimated_parameters": 1479751,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"bn1.weight",
"bn1.bias",
"bn1.running_mean",
"bn1.running_var",
"bn1.num_batches_tracked",
"conv2.weight",
"conv2.bias",
"bn2.weight",
"bn2.bias",
"bn2.running_mean",
"bn2.running_var",
"bn2.num_batches_tracked",
"conv3.weight",
"conv3.bias",
"bn3.weight",
"bn3.bias",
"bn3.running_mean",
"bn3.running_var",
"bn3.num_batches_tracked",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias"
]
},
{
"filename": "best_rl_model.pth_target.pt",
"path": "NN/models/saved/best_rl_model.pth_target.pt",
"size_mb": 5.6540985107421875,
"estimated_parameters": 1479751,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"bn1.weight",
"bn1.bias",
"bn1.running_mean",
"bn1.running_var",
"bn1.num_batches_tracked",
"conv2.weight",
"conv2.bias",
"bn2.weight",
"bn2.bias",
"bn2.running_mean",
"bn2.running_var",
"bn2.num_batches_tracked",
"conv3.weight",
"conv3.bias",
"bn3.weight",
"bn3.bias",
"bn3.running_mean",
"bn3.running_var",
"bn3.num_batches_tracked",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias"
]
},
{
"filename": "cnn_model_best.pt",
"path": "NN/models/saved/cnn_model_best.pt",
"size_mb": 0.0011577606201171875,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\nPlease file an issue with the following so that we can make `weights_only=True` compatible with your use case: WeightsUnpickler error: Unsupported operand 80\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "dqn_agent_agent_state.pt",
"path": "NN/models/saved/dqn_agent_agent_state.pt",
"size_mb": 0.10158538818359375,
"estimated_parameters": 0,
"checkpoint_keys": [
"epsilon",
"update_count",
"losses",
"optimizer_state",
"best_reward",
"avg_reward"
]
},
{
"filename": "dqn_agent_best_agent_state.pt",
"path": "NN/models/saved/dqn_agent_best_agent_state.pt",
"size_mb": 0.001384735107421875,
"estimated_parameters": 0,
"checkpoint_keys": [
"epsilon",
"update_count",
"losses",
"optimizer_state",
"best_reward",
"avg_reward"
]
},
{
"filename": "dqn_agent_best_policy.pt",
"path": "NN/models/saved/dqn_agent_best_policy.pt",
"size_mb": 1.1685981750488281,
"estimated_parameters": 304660,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "dqn_agent_best_target.pt",
"path": "NN/models/saved/dqn_agent_best_target.pt",
"size_mb": 1.1685981750488281,
"estimated_parameters": 304660,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "dqn_agent_episode_100_agent_state.pt",
"path": "NN/models/saved/dqn_agent_episode_100_agent_state.pt",
"size_mb": 0.00135040283203125,
"estimated_parameters": 0,
"checkpoint_keys": [
"epsilon",
"update_count",
"losses",
"optimizer_state"
]
},
{
"filename": "dqn_agent_episode_100_policy.pt",
"path": "NN/models/saved/dqn_agent_episode_100_policy.pt",
"size_mb": 11.874269485473633,
"estimated_parameters": 3109003,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"bn1.weight",
"bn1.bias",
"bn1.running_mean",
"bn1.running_var",
"bn1.num_batches_tracked",
"conv2.weight",
"conv2.bias",
"bn2.weight",
"bn2.bias",
"bn2.running_mean",
"bn2.running_var",
"bn2.num_batches_tracked",
"conv3.weight",
"conv3.bias",
"bn3.weight",
"bn3.bias",
"bn3.running_mean",
"bn3.running_var",
"bn3.num_batches_tracked",
"attention.query.weight",
"attention.query.bias",
"attention.key.weight",
"attention.key.bias",
"attention.value.weight",
"attention.value.bias",
"extrema_conv.weight",
"extrema_conv.bias",
"extrema_bn.weight",
"extrema_bn.bias",
"extrema_bn.running_mean",
"extrema_bn.running_var",
"extrema_bn.num_batches_tracked",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias",
"extrema_fc.weight",
"extrema_fc.bias"
]
},
{
"filename": "dqn_agent_episode_100_target.pt",
"path": "NN/models/saved/dqn_agent_episode_100_target.pt",
"size_mb": 11.874269485473633,
"estimated_parameters": 3109003,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"bn1.weight",
"bn1.bias",
"bn1.running_mean",
"bn1.running_var",
"bn1.num_batches_tracked",
"conv2.weight",
"conv2.bias",
"bn2.weight",
"bn2.bias",
"bn2.running_mean",
"bn2.running_var",
"bn2.num_batches_tracked",
"conv3.weight",
"conv3.bias",
"bn3.weight",
"bn3.bias",
"bn3.running_mean",
"bn3.running_var",
"bn3.num_batches_tracked",
"attention.query.weight",
"attention.query.bias",
"attention.key.weight",
"attention.key.bias",
"attention.value.weight",
"attention.value.bias",
"extrema_conv.weight",
"extrema_conv.bias",
"extrema_bn.weight",
"extrema_bn.bias",
"extrema_bn.running_mean",
"extrema_bn.running_var",
"extrema_bn.num_batches_tracked",
"fc1.weight",
"fc1.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias",
"extrema_fc.weight",
"extrema_fc.bias"
]
},
{
"filename": "dqn_agent_final_agent_state.pt",
"path": "NN/models/saved/dqn_agent_final_agent_state.pt",
"size_mb": 0.0176239013671875,
"estimated_parameters": 0,
"checkpoint_keys": [
"epsilon",
"update_count",
"losses",
"optimizer_state",
"best_reward",
"avg_reward"
]
},
{
"filename": "dqn_agent_final_policy.pt",
"path": "NN/models/saved/dqn_agent_final_policy.pt",
"size_mb": 4.747499465942383,
"estimated_parameters": 1242644,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "dqn_agent_final_target.pt",
"path": "NN/models/saved/dqn_agent_final_target.pt",
"size_mb": 4.747499465942383,
"estimated_parameters": 1242644,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "dqn_agent_policy.pt",
"path": "NN/models/saved/dqn_agent_policy.pt",
"size_mb": 4.74730110168457,
"estimated_parameters": 1242644,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "dqn_agent_target.pt",
"path": "NN/models/saved/dqn_agent_target.pt",
"size_mb": 4.74730110168457,
"estimated_parameters": 1242644,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "enhanced_dqn_best_agent_state.pt",
"path": "NN/models/saved/enhanced_dqn_best_agent_state.pt",
"size_mb": 0.00756072998046875,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "enhanced_dqn_best_policy.pt",
"path": "NN/models/saved/enhanced_dqn_best_policy.pt",
"size_mb": 3.562204360961914,
"estimated_parameters": 1085588,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim",
"confidence_threshold"
]
},
{
"filename": "enhanced_dqn_best_target.pt",
"path": "NN/models/saved/enhanced_dqn_best_target.pt",
"size_mb": 3.562204360961914,
"estimated_parameters": 1085588,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim",
"confidence_threshold"
]
},
{
"filename": "enhanced_dqn_final_agent_state.pt",
"path": "NN/models/saved/enhanced_dqn_final_agent_state.pt",
"size_mb": 0.007564544677734375,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "enhanced_dqn_final_policy.pt",
"path": "NN/models/saved/enhanced_dqn_final_policy.pt",
"size_mb": 3.562246322631836,
"estimated_parameters": 1085588,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim",
"confidence_threshold"
]
},
{
"filename": "enhanced_dqn_final_target.pt",
"path": "NN/models/saved/enhanced_dqn_final_target.pt",
"size_mb": 3.562246322631836,
"estimated_parameters": 1085588,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim",
"confidence_threshold"
]
},
{
"filename": "improved_dqn_agent_best_agent_state.pt",
"path": "NN/models/saved/improved_dqn_agent_best_agent_state.pt",
"size_mb": 0.0016021728515625,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "improved_dqn_agent_best_policy.pt",
"path": "NN/models/saved/improved_dqn_agent_best_policy.pt",
"size_mb": 2.108156204223633,
"estimated_parameters": 546571,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"norm1.batch_norm.weight",
"norm1.batch_norm.bias",
"norm1.batch_norm.running_mean",
"norm1.batch_norm.running_var",
"norm1.batch_norm.num_batches_tracked",
"norm1.group_norm.weight",
"norm1.group_norm.bias",
"norm1.layer_norm.weight",
"norm1.layer_norm.bias",
"norm1.layer_norm_1d.weight",
"norm1.layer_norm_1d.bias",
"conv2.weight",
"conv2.bias",
"norm2.batch_norm.weight",
"norm2.batch_norm.bias",
"norm2.batch_norm.running_mean",
"norm2.batch_norm.running_var",
"norm2.batch_norm.num_batches_tracked",
"norm2.group_norm.weight",
"norm2.group_norm.bias",
"norm2.layer_norm.weight",
"norm2.layer_norm.bias",
"norm2.layer_norm_1d.weight",
"norm2.layer_norm_1d.bias",
"conv3.weight",
"conv3.bias",
"norm3.batch_norm.weight",
"norm3.batch_norm.bias",
"norm3.batch_norm.running_mean",
"norm3.batch_norm.running_var",
"norm3.batch_norm.num_batches_tracked",
"norm3.group_norm.weight",
"norm3.group_norm.bias",
"norm3.layer_norm.weight",
"norm3.layer_norm.bias",
"norm3.layer_norm_1d.weight",
"norm3.layer_norm_1d.bias",
"attention.query.weight",
"attention.query.bias",
"attention.key.weight",
"attention.key.bias",
"attention.value.weight",
"attention.value.bias",
"extrema_conv.weight",
"extrema_conv.bias",
"extrema_norm.batch_norm.weight",
"extrema_norm.batch_norm.bias",
"extrema_norm.batch_norm.running_mean",
"extrema_norm.batch_norm.running_var",
"extrema_norm.batch_norm.num_batches_tracked",
"extrema_norm.group_norm.weight",
"extrema_norm.group_norm.bias",
"extrema_norm.layer_norm.weight",
"extrema_norm.layer_norm.bias",
"extrema_norm.layer_norm_1d.weight",
"extrema_norm.layer_norm_1d.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias",
"extrema_fc.weight",
"extrema_fc.bias",
"fc1.weight",
"fc1.bias"
]
},
{
"filename": "improved_dqn_agent_best_target.pt",
"path": "NN/models/saved/improved_dqn_agent_best_target.pt",
"size_mb": 2.108156204223633,
"estimated_parameters": 546571,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"norm1.batch_norm.weight",
"norm1.batch_norm.bias",
"norm1.batch_norm.running_mean",
"norm1.batch_norm.running_var",
"norm1.batch_norm.num_batches_tracked",
"norm1.group_norm.weight",
"norm1.group_norm.bias",
"norm1.layer_norm.weight",
"norm1.layer_norm.bias",
"norm1.layer_norm_1d.weight",
"norm1.layer_norm_1d.bias",
"conv2.weight",
"conv2.bias",
"norm2.batch_norm.weight",
"norm2.batch_norm.bias",
"norm2.batch_norm.running_mean",
"norm2.batch_norm.running_var",
"norm2.batch_norm.num_batches_tracked",
"norm2.group_norm.weight",
"norm2.group_norm.bias",
"norm2.layer_norm.weight",
"norm2.layer_norm.bias",
"norm2.layer_norm_1d.weight",
"norm2.layer_norm_1d.bias",
"conv3.weight",
"conv3.bias",
"norm3.batch_norm.weight",
"norm3.batch_norm.bias",
"norm3.batch_norm.running_mean",
"norm3.batch_norm.running_var",
"norm3.batch_norm.num_batches_tracked",
"norm3.group_norm.weight",
"norm3.group_norm.bias",
"norm3.layer_norm.weight",
"norm3.layer_norm.bias",
"norm3.layer_norm_1d.weight",
"norm3.layer_norm_1d.bias",
"attention.query.weight",
"attention.query.bias",
"attention.key.weight",
"attention.key.bias",
"attention.value.weight",
"attention.value.bias",
"extrema_conv.weight",
"extrema_conv.bias",
"extrema_norm.batch_norm.weight",
"extrema_norm.batch_norm.bias",
"extrema_norm.batch_norm.running_mean",
"extrema_norm.batch_norm.running_var",
"extrema_norm.batch_norm.num_batches_tracked",
"extrema_norm.group_norm.weight",
"extrema_norm.group_norm.bias",
"extrema_norm.layer_norm.weight",
"extrema_norm.layer_norm.bias",
"extrema_norm.layer_norm_1d.weight",
"extrema_norm.layer_norm_1d.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias",
"extrema_fc.weight",
"extrema_fc.bias",
"fc1.weight",
"fc1.bias"
]
},
{
"filename": "improved_dqn_agent_final_agent_state.pt",
"path": "NN/models/saved/improved_dqn_agent_final_agent_state.pt",
"size_mb": 0.001605987548828125,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "improved_dqn_agent_final_policy.pt",
"path": "NN/models/saved/improved_dqn_agent_final_policy.pt",
"size_mb": 2.108224868774414,
"estimated_parameters": 546571,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"norm1.batch_norm.weight",
"norm1.batch_norm.bias",
"norm1.batch_norm.running_mean",
"norm1.batch_norm.running_var",
"norm1.batch_norm.num_batches_tracked",
"norm1.group_norm.weight",
"norm1.group_norm.bias",
"norm1.layer_norm.weight",
"norm1.layer_norm.bias",
"norm1.layer_norm_1d.weight",
"norm1.layer_norm_1d.bias",
"conv2.weight",
"conv2.bias",
"norm2.batch_norm.weight",
"norm2.batch_norm.bias",
"norm2.batch_norm.running_mean",
"norm2.batch_norm.running_var",
"norm2.batch_norm.num_batches_tracked",
"norm2.group_norm.weight",
"norm2.group_norm.bias",
"norm2.layer_norm.weight",
"norm2.layer_norm.bias",
"norm2.layer_norm_1d.weight",
"norm2.layer_norm_1d.bias",
"conv3.weight",
"conv3.bias",
"norm3.batch_norm.weight",
"norm3.batch_norm.bias",
"norm3.batch_norm.running_mean",
"norm3.batch_norm.running_var",
"norm3.batch_norm.num_batches_tracked",
"norm3.group_norm.weight",
"norm3.group_norm.bias",
"norm3.layer_norm.weight",
"norm3.layer_norm.bias",
"norm3.layer_norm_1d.weight",
"norm3.layer_norm_1d.bias",
"attention.query.weight",
"attention.query.bias",
"attention.key.weight",
"attention.key.bias",
"attention.value.weight",
"attention.value.bias",
"extrema_conv.weight",
"extrema_conv.bias",
"extrema_norm.batch_norm.weight",
"extrema_norm.batch_norm.bias",
"extrema_norm.batch_norm.running_mean",
"extrema_norm.batch_norm.running_var",
"extrema_norm.batch_norm.num_batches_tracked",
"extrema_norm.group_norm.weight",
"extrema_norm.group_norm.bias",
"extrema_norm.layer_norm.weight",
"extrema_norm.layer_norm.bias",
"extrema_norm.layer_norm_1d.weight",
"extrema_norm.layer_norm_1d.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias",
"extrema_fc.weight",
"extrema_fc.bias",
"fc1.weight",
"fc1.bias"
]
},
{
"filename": "improved_dqn_agent_final_target.pt",
"path": "NN/models/saved/improved_dqn_agent_final_target.pt",
"size_mb": 2.108224868774414,
"estimated_parameters": 546571,
"checkpoint_keys": [
"conv1.weight",
"conv1.bias",
"norm1.batch_norm.weight",
"norm1.batch_norm.bias",
"norm1.batch_norm.running_mean",
"norm1.batch_norm.running_var",
"norm1.batch_norm.num_batches_tracked",
"norm1.group_norm.weight",
"norm1.group_norm.bias",
"norm1.layer_norm.weight",
"norm1.layer_norm.bias",
"norm1.layer_norm_1d.weight",
"norm1.layer_norm_1d.bias",
"conv2.weight",
"conv2.bias",
"norm2.batch_norm.weight",
"norm2.batch_norm.bias",
"norm2.batch_norm.running_mean",
"norm2.batch_norm.running_var",
"norm2.batch_norm.num_batches_tracked",
"norm2.group_norm.weight",
"norm2.group_norm.bias",
"norm2.layer_norm.weight",
"norm2.layer_norm.bias",
"norm2.layer_norm_1d.weight",
"norm2.layer_norm_1d.bias",
"conv3.weight",
"conv3.bias",
"norm3.batch_norm.weight",
"norm3.batch_norm.bias",
"norm3.batch_norm.running_mean",
"norm3.batch_norm.running_var",
"norm3.batch_norm.num_batches_tracked",
"norm3.group_norm.weight",
"norm3.group_norm.bias",
"norm3.layer_norm.weight",
"norm3.layer_norm.bias",
"norm3.layer_norm_1d.weight",
"norm3.layer_norm_1d.bias",
"attention.query.weight",
"attention.query.bias",
"attention.key.weight",
"attention.key.bias",
"attention.value.weight",
"attention.value.bias",
"extrema_conv.weight",
"extrema_conv.bias",
"extrema_norm.batch_norm.weight",
"extrema_norm.batch_norm.bias",
"extrema_norm.batch_norm.running_mean",
"extrema_norm.batch_norm.running_var",
"extrema_norm.batch_norm.num_batches_tracked",
"extrema_norm.group_norm.weight",
"extrema_norm.group_norm.bias",
"extrema_norm.layer_norm.weight",
"extrema_norm.layer_norm.bias",
"extrema_norm.layer_norm_1d.weight",
"extrema_norm.layer_norm_1d.bias",
"fc2.weight",
"fc2.bias",
"fc3.weight",
"fc3.bias",
"value_fc.weight",
"value_fc.bias",
"extrema_fc.weight",
"extrema_fc.bias",
"fc1.weight",
"fc1.bias"
]
},
{
"filename": "optimized_short_term_model.pt",
"path": "NN/models/saved/optimized_short_term_model.pt",
"size_mb": 1.1817035675048828,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "optimized_short_term_model_best.pt",
"path": "NN/models/saved/optimized_short_term_model_best.pt",
"size_mb": 4.372953414916992,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "optimized_short_term_model_final.pt",
"path": "NN/models/saved/optimized_short_term_model_final.pt",
"size_mb": 4.373065948486328,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "optimized_short_term_model_realtime_best.pt",
"path": "NN/models/saved/optimized_short_term_model_realtime_best.pt",
"size_mb": 6.557572364807129,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "optimized_short_term_model_realtime_final.pt",
"path": "NN/models/saved/optimized_short_term_model_realtime_final.pt",
"size_mb": 6.557641983032227,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "optimized_short_term_model_ticks_best.pt",
"path": "NN/models/saved/optimized_short_term_model_ticks_best.pt",
"size_mb": 0.13934326171875,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "optimized_short_term_model_ticks_final.pt",
"path": "NN/models/saved/optimized_short_term_model_ticks_final.pt",
"size_mb": 0.13964271545410156,
"estimated_parameters": "Error loading",
"error": "Weights only load failed. This file can still be loaded, to do so you have two options, \u001b[1mdo those steps only if you trust the source of the checkpoint\u001b[0m. \n\t(1) In PyTorch 2.6, we changed the default value of the `weights_only` argument in `torch.load` from `False` to `True`. Re-running `torch.load` with `weights_only` set to `False` will likely succeed, but it can result in arbitrary code execution. Do it only if you got the file from a trusted source.\n\t(2) Alternatively, to load with `weights_only=True` please check the recommended steps in the following error message.\n\tWeightsUnpickler error: Unsupported global: GLOBAL numpy._core.multiarray.scalar was not an allowed global by default. Please use `torch.serialization.add_safe_globals([scalar])` or the `torch.serialization.safe_globals([scalar])` context manager to allowlist this global if you trust this class/function.\n\nCheck the documentation of torch.load to learn more about types accepted by default with weights_only https://pytorch.org/docs/stable/generated/torch.load.html."
},
{
"filename": "rl_agent_best_agent_state.pt",
"path": "NN/models/saved/rl_agent_best_agent_state.pt",
"size_mb": 0.00925445556640625,
"estimated_parameters": 0,
"checkpoint_keys": [
"epsilon",
"update_count",
"losses",
"optimizer_state",
"best_reward",
"avg_reward"
]
},
{
"filename": "rl_agent_best_policy.pt",
"path": "NN/models/saved/rl_agent_best_policy.pt",
"size_mb": 7.395586013793945,
"estimated_parameters": 1936916,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "rl_agent_best_target.pt",
"path": "NN/models/saved/rl_agent_best_target.pt",
"size_mb": 7.395586013793945,
"estimated_parameters": 1936916,
"checkpoint_keys": [
"state_dict",
"input_shape",
"n_actions",
"feature_dim"
]
},
{
"filename": "supervised_model_best.pt",
"path": "NN/models/saved/supervised_model_best.pt",
"size_mb": 0.157318115234375,
"estimated_parameters": 12453,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes",
"confidence_threshold",
"max_consecutive_same_action",
"action_counts",
"last_actions",
"model_version",
"timestamp"
]
},
{
"filename": "supervised_model_best.pt.pt",
"path": "NN/models/saved/supervised_model_best.pt.pt",
"size_mb": 1.2264022827148438,
"estimated_parameters": 105670,
"checkpoint_keys": [
"model_state_dict",
"optimizer_state_dict",
"history",
"window_size",
"num_features",
"output_size",
"timeframes",
"confidence_threshold",
"max_consecutive_same_action",
"action_counts",
"last_actions",
"model_version",
"timestamp"
]
}
],
"summary": {
"total_model_architectures": 2,
"total_parameters_across_all": 504889098,
"total_size_mb": 1926.6676025390625,
"largest_model_parameters": 336592732,
"smallest_model_parameters": 168296366,
"saved_models_count": 52,
"saved_models_total_size_mb": 720.3670511245728
}
}