better pivots

This commit is contained in:
Dobromir Popov
2025-10-21 11:45:57 +03:00
parent a8ea9b24c0
commit 68b91f37bd
7 changed files with 1318 additions and 26 deletions

View File

@@ -57,6 +57,14 @@ from .huobi_cob_websocket import get_huobi_cob_websocket
from .cob_integration import COBIntegration
from .report_data_crawler import ReportDataCrawler, ReportData
# Import unified storage components (optional)
try:
from .unified_data_provider_extension import UnifiedDataProviderExtension
UNIFIED_STORAGE_AVAILABLE = True
except ImportError:
UNIFIED_STORAGE_AVAILABLE = False
logger.warning("Unified storage components not available")
logger = logging.getLogger(__name__)
@dataclass
@@ -249,6 +257,10 @@ class DataProvider:
self.last_pivot_calculation: Dict[str, datetime] = {}
self.pivot_calculation_interval = timedelta(minutes=5) # Recalculate every 5 minutes
# Unified storage system (optional, initialized on demand)
self.unified_storage: Optional['UnifiedDataProviderExtension'] = None
self._unified_storage_enabled = False
# Auto-fix corrupted cache files on startup
self._auto_fix_corrupted_cache()
@@ -331,6 +343,163 @@ class DataProvider:
# Start COB WebSocket integration
self.start_cob_websocket_integration()
# ===================================================================
# UNIFIED STORAGE SYSTEM METHODS
# ===================================================================
async def enable_unified_storage(self):
"""
Enable unified storage system with TimescaleDB backend.
Provides single endpoint for real-time and historical data access.
Returns:
bool: True if successful, False otherwise
"""
if not UNIFIED_STORAGE_AVAILABLE:
logger.error("Unified storage components not available. Install required dependencies.")
return False
if self._unified_storage_enabled:
logger.info("Unified storage already enabled")
return True
try:
logger.info("Enabling unified storage system...")
# Create unified storage extension
self.unified_storage = UnifiedDataProviderExtension(self)
# Initialize unified storage
success = await self.unified_storage.initialize_unified_storage()
if success:
self._unified_storage_enabled = True
logger.info("✅ Unified storage system enabled successfully")
return True
else:
logger.error("Failed to enable unified storage system")
return False
except Exception as e:
logger.error(f"Error enabling unified storage: {e}")
return False
async def disable_unified_storage(self):
"""Disable unified storage system."""
if not self._unified_storage_enabled:
return
try:
if self.unified_storage:
await self.unified_storage.shutdown_unified_storage()
self._unified_storage_enabled = False
logger.info("Unified storage system disabled")
except Exception as e:
logger.error(f"Error disabling unified storage: {e}")
async def get_inference_data_unified(
self,
symbol: str,
timestamp: Optional[datetime] = None,
context_window_minutes: int = 5
):
"""
Get complete inference data using unified storage system.
This is the MAIN UNIFIED ENDPOINT for all data access.
- If timestamp is None: Returns latest real-time data from cache
- If timestamp is provided: Returns historical data from database
Args:
symbol: Trading symbol (e.g., 'ETH/USDT')
timestamp: Target timestamp (None = latest real-time data)
context_window_minutes: Minutes of context data before/after timestamp
Returns:
InferenceDataFrame with complete market data
"""
if not self._unified_storage_enabled:
logger.warning("Unified storage not enabled. Call enable_unified_storage() first.")
# Auto-enable if possible
await self.enable_unified_storage()
if self.unified_storage:
return await self.unified_storage.get_inference_data(
symbol, timestamp, context_window_minutes
)
else:
logger.error("Unified storage not available")
return None
async def get_multi_timeframe_data_unified(
self,
symbol: str,
timeframes: List[str],
timestamp: Optional[datetime] = None,
limit: int = 100
) -> Dict[str, pd.DataFrame]:
"""
Get aligned multi-timeframe data using unified storage.
Args:
symbol: Trading symbol
timeframes: List of timeframes
timestamp: Target timestamp (None = latest)
limit: Number of candles per timeframe
Returns:
Dictionary mapping timeframe to DataFrame
"""
if not self._unified_storage_enabled:
await self.enable_unified_storage()
if self.unified_storage:
return await self.unified_storage.get_multi_timeframe_data(
symbol, timeframes, timestamp, limit
)
else:
return {}
async def get_order_book_data_unified(
self,
symbol: str,
timestamp: Optional[datetime] = None
):
"""
Get order book data with imbalances using unified storage.
Args:
symbol: Trading symbol
timestamp: Target timestamp (None = latest)
Returns:
OrderBookDataFrame with bids, asks, imbalances
"""
if not self._unified_storage_enabled:
await self.enable_unified_storage()
if self.unified_storage:
return await self.unified_storage.get_order_book_data(symbol, timestamp)
else:
return None
def get_unified_storage_stats(self) -> Dict[str, Any]:
"""Get statistics from unified storage system."""
if self.unified_storage:
return self.unified_storage.get_unified_stats()
else:
return {'enabled': False, 'error': 'Unified storage not initialized'}
def is_unified_storage_enabled(self) -> bool:
"""Check if unified storage is enabled."""
return self._unified_storage_enabled
# ===================================================================
# END UNIFIED STORAGE SYSTEM METHODS
# ===================================================================
def start_automatic_data_maintenance(self):
"""Start automatic data maintenance system"""
if self.data_maintenance_active:
@@ -1853,8 +2022,8 @@ class DataProvider:
# Convert DataFrame to numpy array format expected by Williams Market Structure
ohlcv_array = monthly_data[['timestamp', 'open', 'high', 'low', 'close', 'volume']].copy()
# Convert timestamp to numeric for Williams analysis
ohlcv_array['timestamp'] = ohlcv_array['timestamp'].astype(np.int64) // 10**9 # Convert to seconds
# Convert timestamp to numeric for Williams analysis (ms)
ohlcv_array['timestamp'] = ohlcv_array['timestamp'].astype(np.int64) // 10**6
ohlcv_array = ohlcv_array.to_numpy()
# Initialize Williams Market Structure analyzer
@@ -2248,7 +2417,7 @@ class DataProvider:
"""Get pivot bounds for a symbol"""
return self.pivot_bounds.get(symbol)
def get_williams_pivot_levels(self, symbol: str) -> Dict[int, Any]:
def get_williams_pivot_levels(self, symbol: str, base_timeframe: str = '1m', limit: int = 2000) -> Dict[int, Any]:
"""Get Williams Market Structure pivot levels with full trend analysis
Returns:
@@ -2262,16 +2431,18 @@ class DataProvider:
logger.warning(f"Williams structure not initialized for {symbol}")
return {}
# Calculate fresh pivot points from current cached data
df_1m = self.get_historical_data(symbol, '1m', limit=2000)
if df_1m is None or len(df_1m) < 100:
logger.warning(f"Insufficient 1m data for Williams pivot calculation: {symbol}")
# Calculate fresh pivot points from current cached data using desired base timeframe
tf = base_timeframe if base_timeframe in ['1s', '1m'] else '1m'
df = self.get_historical_data(symbol, tf, limit=limit)
if df is None or len(df) < 100:
logger.warning(f"Insufficient {tf} data for Williams pivot calculation: {symbol}")
return {}
# Convert DataFrame to numpy array
ohlcv_array = df_1m[['open', 'high', 'low', 'close', 'volume']].copy()
# Add timestamp as first column (convert to seconds)
timestamps = df_1m.index.astype(np.int64) // 10**9 # Convert to seconds
ohlcv_array = df[['open', 'high', 'low', 'close', 'volume']].copy()
# Add timestamp as first column (convert to milliseconds for WMS)
# pandas index is ns -> convert to ms
timestamps = df.index.astype(np.int64) // 10**6
ohlcv_array.insert(0, 'timestamp', timestamps)
ohlcv_array = ohlcv_array.to_numpy()