improve training and model data
This commit is contained in:
@ -23,7 +23,7 @@ class RewardCalculator:
|
||||
self.trade_timestamps = []
|
||||
self.frequency_threshold = 10 # Trades per minute threshold for penalty
|
||||
self.max_frequency_penalty = 0.05
|
||||
|
||||
|
||||
def record_pnl(self, pnl):
|
||||
"""Record P&L for risk adjustment calculations"""
|
||||
self.trade_pnls.append(pnl)
|
||||
@ -36,7 +36,7 @@ class RewardCalculator:
|
||||
self.trade_timestamps.append(time())
|
||||
if len(self.trade_timestamps) > 100:
|
||||
self.trade_timestamps.pop(0)
|
||||
|
||||
|
||||
def _calculate_frequency_penalty(self):
|
||||
"""Calculate penalty for high-frequency trading"""
|
||||
if len(self.trade_timestamps) < 2:
|
||||
@ -47,9 +47,9 @@ class RewardCalculator:
|
||||
trades_per_minute = (len(self.trade_timestamps) / time_span) * 60
|
||||
if trades_per_minute > self.frequency_threshold:
|
||||
penalty = min(self.max_frequency_penalty, (trades_per_minute - self.frequency_threshold) * 0.001)
|
||||
return penalty
|
||||
return penalty
|
||||
return 0.0
|
||||
|
||||
|
||||
def _calculate_risk_adjustment(self, reward):
|
||||
"""Adjust rewards based on risk (simple Sharpe ratio implementation)"""
|
||||
if len(self.trade_pnls) < 5:
|
||||
@ -62,7 +62,7 @@ class RewardCalculator:
|
||||
sharpe = mean_return / std_return
|
||||
adjustment_factor = np.clip(1.0 + 0.5 * sharpe, 0.5, 2.0)
|
||||
return reward * adjustment_factor
|
||||
|
||||
|
||||
def _calculate_holding_reward(self, position_held_time, price_change):
|
||||
"""Calculate reward for holding a position"""
|
||||
base_holding_reward = 0.0005 * (position_held_time / 60.0)
|
||||
|
Reference in New Issue
Block a user