fix T training memory usage (due for more improvement)
This commit is contained in:
@@ -836,22 +836,10 @@ class TradingOrchestrator:
|
||||
try:
|
||||
from NN.models.dqn_agent import DQNAgent
|
||||
|
||||
# Determine actual state size from BaseDataInput
|
||||
try:
|
||||
base_data = self.data_provider.build_base_data_input(self.symbol)
|
||||
if base_data:
|
||||
actual_state_size = len(base_data.get_feature_vector())
|
||||
logger.info(f"Detected actual state size: {actual_state_size}")
|
||||
else:
|
||||
actual_state_size = 7850 # Fallback based on error message
|
||||
logger.warning(
|
||||
f"Could not determine state size, using fallback: {actual_state_size}"
|
||||
)
|
||||
except Exception as e:
|
||||
actual_state_size = 7850 # Fallback based on error message
|
||||
logger.warning(
|
||||
f"Error determining state size: {e}, using fallback: {actual_state_size}"
|
||||
)
|
||||
# Use known state size instead of building data (which triggers massive API calls)
|
||||
# The state size is determined by BaseDataInput structure and doesn't change
|
||||
actual_state_size = 7850 # Known size from BaseDataInput.get_feature_vector()
|
||||
logger.info(f"Using known state size: {actual_state_size}")
|
||||
|
||||
action_size = self.config.rl.get("action_space", 3)
|
||||
self.rl_agent = DQNAgent(
|
||||
|
||||
Reference in New Issue
Block a user