diff --git a/Custom-Ocean.xyz-Dashboard/App.py b/Custom-Ocean.xyz-Dashboard/App.py
deleted file mode 100644
index 5a15949..0000000
--- a/Custom-Ocean.xyz-Dashboard/App.py
+++ /dev/null
@@ -1,1823 +0,0 @@
-from flask import Flask, render_template, jsonify, Response, request
-import requests, json, os, logging, re, time, sys, gc, psutil, signal, random
-from datetime import datetime, timedelta
-from zoneinfo import ZoneInfo
-from dataclasses import dataclass
-from concurrent.futures import ThreadPoolExecutor
-from bs4 import BeautifulSoup
-from flask_caching import Cache
-import redis
-from apscheduler.schedulers.background import BackgroundScheduler
-import threading
-
-app = Flask(__name__)
-
-# Set up caching using a simple in-memory cache.
-cache = Cache(app, config={'CACHE_TYPE': 'SimpleCache', 'CACHE_DEFAULT_TIMEOUT': 10})
-
-# Global variables for arrow history, legacy hashrate history, and a log of full metrics snapshots.
-arrow_history = {} # stored per second
-hashrate_history = []
-metrics_log = []
-
-# Limits for data collections to prevent memory growth
-MAX_HISTORY_ENTRIES = 180 # 3 hours worth at 1 min intervals
-MAX_SSE_CONNECTIONS = 10 # Maximum concurrent SSE connections
-MAX_SSE_CONNECTION_TIME = 900 # 15 minutes maximum SSE connection time (increased from 10 min)
-
-# Track active SSE connections
-active_sse_connections = 0
-sse_connections_lock = threading.Lock()
-
-# Global variable to hold the cached metrics updated by the background job.
-cached_metrics = None
-last_metrics_update_time = None
-
-# New global variables for worker data caching
-worker_data_cache = None
-last_worker_data_update = None
-WORKER_DATA_CACHE_TIMEOUT = 60 # Cache worker data for 60 seconds
-
-# Track scheduler health
-scheduler_last_successful_run = None
-scheduler_recreate_lock = threading.Lock()
-
-# Global lock for thread safety on shared state.
-state_lock = threading.Lock()
-
-# Configure logging.
-logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
-
-# --- Global Server Start Time (Los Angeles Time) ---
-SERVER_START_TIME = datetime.now(ZoneInfo("America/Los_Angeles"))
-
-# --- Disable Client Caching for All Responses ---
-@app.after_request
-def add_header(response):
- response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
- response.headers["Pragma"] = "no-cache"
- response.headers["Expires"] = "0"
- return response
-
-# --- Memory usage monitoring ---
-def log_memory_usage():
- """Log current memory usage"""
- try:
- process = psutil.Process(os.getpid())
- mem_info = process.memory_info()
- logging.info(f"Memory usage: {mem_info.rss / 1024 / 1024:.2f} MB (RSS)")
-
- # Log the size of key data structures
- logging.info(f"Arrow history entries: {sum(len(v) for v in arrow_history.values() if isinstance(v, list))}")
- logging.info(f"Metrics log entries: {len(metrics_log)}")
- logging.info(f"Active SSE connections: {active_sse_connections}")
- except Exception as e:
- logging.error(f"Error logging memory usage: {e}")
-
-# --- Redis Connection for Shared State (fixed) ---
-def get_redis_client():
- """Get a Redis client with connection retry logic."""
- REDIS_URL = os.environ.get("REDIS_URL")
-
- # Make Redis truly optional - if no URL provided, don't attempt connection
- if not REDIS_URL:
- logging.info("Redis URL not configured, using in-memory state only.")
- return None
-
- retry_count = 0
- max_retries = 3
-
- while retry_count < max_retries:
- try:
- # Removed compress parameter as it's not supported in your version
- client = redis.Redis.from_url(REDIS_URL)
- client.ping() # Test the connection
- logging.info(f"Connected to Redis at {REDIS_URL}")
- return client
- except Exception as e:
- retry_count += 1
- if retry_count < max_retries:
- logging.warning(f"Redis connection attempt {retry_count} failed: {e}. Retrying...")
- time.sleep(1) # Wait before retrying
- else:
- logging.error(f"Could not connect to Redis after {max_retries} attempts: {e}")
- return None
-
-# Get Redis client with retry logic
-redis_client = get_redis_client()
-STATE_KEY = "graph_state"
-
-# --- Modified Load Graph State Function ---
-def load_graph_state():
- """Load graph state from Redis with support for the optimized format."""
- global arrow_history, hashrate_history, metrics_log
- if redis_client:
- try:
- # Check version to handle format changes
- version = redis_client.get(f"{STATE_KEY}_version")
- version = version.decode('utf-8') if version else "1.0"
-
- state_json = redis_client.get(STATE_KEY)
- if state_json:
- state = json.loads(state_json)
-
- # Handle different versions of the data format
- if version == "2.0": # Optimized format
- # Restore arrow_history
- compact_arrow_history = state.get("arrow_history", {})
- for key, values in compact_arrow_history.items():
- arrow_history[key] = [
- {"time": entry.get("t", ""),
- "value": entry.get("v", 0),
- "arrow": ""} # Default empty arrow
- for entry in values
- ]
-
- # Restore hashrate_history
- hashrate_history = state.get("hashrate_history", [])
-
- # Restore metrics_log
- compact_metrics_log = state.get("metrics_log", [])
- metrics_log = []
- for entry in compact_metrics_log:
- metrics_log.append({
- "timestamp": entry.get("ts", ""),
- "metrics": entry.get("m", {})
- })
- else: # Original format
- arrow_history = state.get("arrow_history", {})
- hashrate_history = state.get("hashrate_history", [])
- metrics_log = state.get("metrics_log", [])
-
- logging.info(f"Loaded graph state from Redis (format version {version}).")
- else:
- logging.info("No previous graph state found in Redis.")
- except Exception as e:
- logging.error(f"Error loading graph state from Redis: {e}")
- else:
- logging.info("Redis not available, using in-memory state.")
-
-# --- Save Graph State with Advanced Optimizations ---
-def save_graph_state():
- """Save graph state to Redis with optimized frequency, pruning, and data reduction."""
- if redis_client:
- # Check if we've saved recently to avoid too frequent saves
- # Only save at most once every 5 minutes
- current_time = time.time()
- if hasattr(save_graph_state, 'last_save_time') and \
- current_time - save_graph_state.last_save_time < 300: # 300 seconds = 5 minutes
- logging.debug("Skipping Redis save - last save was less than 5 minutes ago")
- return
-
- # Update the last save time
- save_graph_state.last_save_time = current_time
-
- # Prune data first to reduce volume
- prune_old_data()
-
- # Create compact versions of the data structures for Redis storage
- try:
- # 1. Create compact arrow_history with minimal data
- compact_arrow_history = {}
- for key, values in arrow_history.items():
- if isinstance(values, list) and values:
- # Only store recent history (last 2 hours)
- recent_values = values[-120:] if len(values) > 120 else values
- # Use shorter field names and remove unnecessary fields
- compact_arrow_history[key] = [
- {"t": entry["time"], "v": entry["value"]}
- for entry in recent_values
- ]
-
- # 2. Only keep essential hashrate_history
- compact_hashrate_history = hashrate_history[-60:] if len(hashrate_history) > 60 else hashrate_history
-
- # 3. Only keep recent metrics_log entries (last 30 minutes)
- # This is typically the largest data structure
- compact_metrics_log = []
- if metrics_log:
- # Keep only last 30 entries (30 minutes assuming 1-minute updates)
- recent_logs = metrics_log[-30:]
-
- for entry in recent_logs:
- # Only keep necessary fields from each metrics entry
- if "metrics" in entry and "timestamp" in entry:
- metrics_copy = {}
- original_metrics = entry["metrics"]
-
- # Only copy the most important metrics for historical tracking
- essential_keys = [
- "hashrate_60sec", "hashrate_24hr", "btc_price",
- "workers_hashing", "unpaid_earnings", "difficulty",
- "network_hashrate", "daily_profit_usd"
- ]
-
- for key in essential_keys:
- if key in original_metrics:
- metrics_copy[key] = original_metrics[key]
-
- # Skip arrow_history within metrics as we already stored it separately
- compact_metrics_log.append({
- "ts": entry["timestamp"],
- "m": metrics_copy
- })
-
- # Create the final state object
- state = {
- "arrow_history": compact_arrow_history,
- "hashrate_history": compact_hashrate_history,
- "metrics_log": compact_metrics_log
- }
-
- # Convert to JSON once to reuse and measure size
- state_json = json.dumps(state)
- data_size_kb = len(state_json) / 1024
-
- # Log data size for monitoring
- logging.info(f"Saving graph state to Redis: {data_size_kb:.2f} KB (optimized format)")
-
- # Only save if data size is reasonable (adjust threshold as needed)
- if data_size_kb > 2000: # 2MB warning threshold (reduced from 5MB)
- logging.warning(f"Redis save data size is still large: {data_size_kb:.2f} KB")
-
- # Store version info to handle future format changes
- redis_client.set(f"{STATE_KEY}_version", "2.0")
- redis_client.set(STATE_KEY, state_json)
- logging.info(f"Successfully saved graph state to Redis ({data_size_kb:.2f} KB)")
- except Exception as e:
- logging.error(f"Error saving graph state to Redis: {e}")
- else:
- logging.info("Redis not available, skipping state save.")
-
-# Load persisted state on startup.
-load_graph_state()
-
-# --- Clean up old data ---
-def prune_old_data():
- """Remove old data to prevent memory growth with optimized strategy"""
- global arrow_history, metrics_log
-
- with state_lock:
- # Prune arrow_history with more sophisticated approach
- for key in arrow_history:
- if isinstance(arrow_history[key], list):
- if len(arrow_history[key]) > MAX_HISTORY_ENTRIES:
- # For most recent data (last hour) - keep every point
- recent_data = arrow_history[key][-60:]
-
- # For older data, reduce resolution by keeping every other point
- older_data = arrow_history[key][:-60]
- if len(older_data) > 0:
- sparse_older_data = [older_data[i] for i in range(0, len(older_data), 2)]
- arrow_history[key] = sparse_older_data + recent_data
- else:
- arrow_history[key] = recent_data
-
- logging.info(f"Pruned {key} history from {len(arrow_history[key])} to {len(sparse_older_data + recent_data) if older_data else len(recent_data)} entries")
-
- # Prune metrics_log more aggressively
- if len(metrics_log) > MAX_HISTORY_ENTRIES:
- # Keep most recent entries at full resolution
- recent_logs = metrics_log[-60:]
-
- # Reduce resolution of older entries
- older_logs = metrics_log[:-60]
- if len(older_logs) > 0:
- sparse_older_logs = [older_logs[i] for i in range(0, len(older_logs), 3)] # Keep every 3rd entry
- metrics_log = sparse_older_logs + recent_logs
- logging.info(f"Pruned metrics log from {len(metrics_log)} to {len(sparse_older_logs + recent_logs)} entries")
-
- # Free memory more aggressively
- gc.collect()
-
- # Log memory usage after pruning
- log_memory_usage()
-
-# --- State persistence function ---
-def persist_critical_state():
- """Store critical state in Redis for recovery after worker restarts"""
- if redis_client:
- try:
- # Only persist if we have valid data
- if cached_metrics and cached_metrics.get("server_timestamp"):
- state = {
- "cached_metrics_timestamp": cached_metrics.get("server_timestamp"),
- "last_successful_run": scheduler_last_successful_run,
- "last_update_time": last_metrics_update_time
- }
- redis_client.set("critical_state", json.dumps(state))
- logging.info(f"Persisted critical state to Redis, timestamp: {cached_metrics.get('server_timestamp')}")
- except Exception as e:
- logging.error(f"Error persisting critical state: {e}")
-
-# --- Custom Template Filter ---
-@app.template_filter('commafy')
-def commafy(value):
- try:
- return "{:,}".format(int(value))
- except Exception:
- return value
-
-# --- Configuration Management ---
-CONFIG_FILE = "config.json"
-
-def load_config():
- default_config = {
- "power_cost": 0.0,
- "power_usage": 0.0,
- "wallet": "bc1py5zmrtssheq3shd8cptpl5l5m3txxr5afynyg2gyvam6w78s4dlqqnt4v9"
- }
- if os.path.exists(CONFIG_FILE):
- try:
- with open(CONFIG_FILE, "r") as f:
- config = json.load(f)
- return config
- except Exception as e:
- logging.error(f"Error loading config: {e}")
- return default_config
-
-# --- Data Structures ---
-@dataclass
-class OceanData:
- pool_total_hashrate: float = None
- pool_total_hashrate_unit: str = None
- hashrate_24hr: float = None
- hashrate_24hr_unit: str = None
- hashrate_3hr: float = None
- hashrate_3hr_unit: str = None
- hashrate_10min: float = None
- hashrate_10min_unit: str = None
- hashrate_5min: float = None
- hashrate_5min_unit: str = None
- hashrate_60sec: float = None
- hashrate_60sec_unit: str = None
- estimated_earnings_per_day: float = None
- estimated_earnings_next_block: float = None
- estimated_rewards_in_window: float = None
- workers_hashing: int = None
- unpaid_earnings: float = None
- est_time_to_payout: str = None
- last_block: str = None
- last_block_height: str = None
- last_block_time: str = None
- blocks_found: str = None
- total_last_share: str = "N/A"
- # Field for BTC earned for the last block, now in sats.
- last_block_earnings: str = None
-
-def convert_to_ths(value: float, unit: str) -> float:
- """Convert any hashrate unit to TH/s equivalent."""
- unit = unit.lower()
- if 'ph/s' in unit:
- return value * 1000 # 1 PH/s = 1000 TH/s
- elif 'eh/s' in unit:
- return value * 1000000 # 1 EH/s = 1,000,000 TH/s
- elif 'gh/s' in unit:
- return value / 1000 # 1 TH/s = 1000 GH/s
- elif 'mh/s' in unit:
- return value / 1000000 # 1 TH/s = 1,000,000 MH/s
- elif 'th/s' in unit:
- return value
- else:
- # Log unexpected unit
- logging.warning(f"Unexpected hashrate unit: {unit}, defaulting to treating as TH/s")
- return value
-
-# --- Data Fetching Functions ---
-def get_ocean_data(session: requests.Session, wallet: str) -> OceanData:
- base_url = "https://ocean.xyz"
- stats_url = f"{base_url}/stats/{wallet}"
- headers = {
- 'User-Agent': 'Mozilla/5.0',
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
- 'Cache-Control': 'no-cache'
- }
-
- # Create an empty data object to populate
- data = OceanData()
-
- try:
- response = session.get(stats_url, headers=headers, timeout=10)
- if not response.ok:
- logging.error(f"Error fetching ocean data: status code {response.status_code}")
- return None
-
- soup = BeautifulSoup(response.text, 'html.parser')
-
- # Safely extract pool status information
- try:
- pool_status = soup.find("p", id="pool-status-item")
- if pool_status:
- text = pool_status.get_text(strip=True)
- m_total = re.search(r'HASHRATE:\s*([\d\.]+)\s*(\w+/s)', text, re.IGNORECASE)
- if m_total:
- raw_val = float(m_total.group(1))
- unit = m_total.group(2)
- data.pool_total_hashrate = raw_val
- data.pool_total_hashrate_unit = unit
- span = pool_status.find("span", class_="pool-status-newline")
- if span:
- last_block_text = span.get_text(strip=True)
- m_block = re.search(r'LAST BLOCK:\s*(\d+\s*\(.*\))', last_block_text, re.IGNORECASE)
- if m_block:
- full_last_block = m_block.group(1)
- data.last_block = full_last_block
- match = re.match(r'(\d+)\s*\((.*?)\)', full_last_block)
- if match:
- data.last_block_height = match.group(1)
- data.last_block_time = match.group(2)
- else:
- data.last_block_height = full_last_block
- data.last_block_time = ""
- except Exception as e:
- logging.error(f"Error parsing pool status: {e}")
-
- # Parse the earnings value from the earnings table and convert to sats.
- try:
- earnings_table = soup.find('tbody', id='earnings-tablerows')
- if earnings_table:
- latest_row = earnings_table.find('tr', class_='table-row')
- if latest_row:
- cells = latest_row.find_all('td', class_='table-cell')
- if len(cells) >= 3:
- earnings_text = cells[2].get_text(strip=True)
- earnings_value = earnings_text.replace('BTC', '').strip()
- try:
- btc_earnings = float(earnings_value)
- sats = int(round(btc_earnings * 100000000))
- data.last_block_earnings = str(sats)
- except Exception:
- data.last_block_earnings = earnings_value
- except Exception as e:
- logging.error(f"Error parsing earnings data: {e}")
-
- # Parse hashrate data from the hashrates table
- try:
- time_mapping = {
- '24 hrs': ('hashrate_24hr', 'hashrate_24hr_unit'),
- '3 hrs': ('hashrate_3hr', 'hashrate_3hr_unit'),
- '10 min': ('hashrate_10min', 'hashrate_10min_unit'),
- '5 min': ('hashrate_5min', 'hashrate_5min_unit'),
- '60 sec': ('hashrate_60sec', 'hashrate_60sec_unit')
- }
- hashrate_table = soup.find('tbody', id='hashrates-tablerows')
- if hashrate_table:
- for row in hashrate_table.find_all('tr', class_='table-row'):
- cells = row.find_all('td', class_='table-cell')
- if len(cells) >= 2:
- period_text = cells[0].get_text(strip=True).lower()
- hashrate_str = cells[1].get_text(strip=True).lower()
- try:
- parts = hashrate_str.split()
- hashrate_val = float(parts[0])
- unit = parts[1] if len(parts) > 1 else 'th/s'
- for key, (attr, unit_attr) in time_mapping.items():
- if key.lower() in period_text:
- setattr(data, attr, hashrate_val)
- setattr(data, unit_attr, unit)
- break
- except Exception as e:
- logging.error(f"Error parsing hashrate '{hashrate_str}': {e}")
- except Exception as e:
- logging.error(f"Error parsing hashrate table: {e}")
-
- # Parse lifetime stats data
- try:
- lifetime_snap = soup.find('div', id='lifetimesnap-statcards')
- if lifetime_snap:
- for container in lifetime_snap.find_all('div', class_='blocks dashboard-container'):
- label_div = container.find('div', class_='blocks-label')
- if label_div:
- label_text = label_div.get_text(strip=True).lower()
- earnings_span = label_div.find_next('span', class_=lambda x: x != 'tooltiptext')
- if earnings_span:
- span_text = earnings_span.get_text(strip=True)
- try:
- earnings_value = float(span_text.split()[0].replace(',', ''))
- if "earnings" in label_text and "day" in label_text:
- data.estimated_earnings_per_day = earnings_value
- except Exception:
- pass
- except Exception as e:
- logging.error(f"Error parsing lifetime stats: {e}")
-
- # Parse payout stats data
- try:
- payout_snap = soup.find('div', id='payoutsnap-statcards')
- if payout_snap:
- for container in payout_snap.find_all('div', class_='blocks dashboard-container'):
- label_div = container.find('div', class_='blocks-label')
- if label_div:
- label_text = label_div.get_text(strip=True).lower()
- earnings_span = label_div.find_next('span', class_=lambda x: x != 'tooltiptext')
- if earnings_span:
- span_text = earnings_span.get_text(strip=True)
- try:
- earnings_value = float(span_text.split()[0].replace(',', ''))
- if "earnings" in label_text and "block" in label_text:
- data.estimated_earnings_next_block = earnings_value
- elif "rewards" in label_text and "window" in label_text:
- data.estimated_rewards_in_window = earnings_value
- except Exception:
- pass
- except Exception as e:
- logging.error(f"Error parsing payout stats: {e}")
-
- # Parse user stats data
- try:
- usersnap = soup.find('div', id='usersnap-statcards')
- if usersnap:
- for container in usersnap.find_all('div', class_='blocks dashboard-container'):
- label_div = container.find('div', class_='blocks-label')
- if label_div:
- label_text = label_div.get_text(strip=True).lower()
- value_span = label_div.find_next('span', class_=lambda x: x != 'tooltiptext')
- if value_span:
- span_text = value_span.get_text(strip=True)
- if "workers currently hashing" in label_text:
- try:
- data.workers_hashing = int(span_text.replace(",", ""))
- except Exception:
- pass
- elif "unpaid earnings" in label_text and "btc" in span_text.lower():
- try:
- data.unpaid_earnings = float(span_text.split()[0].replace(',', ''))
- except Exception:
- pass
- elif "estimated time until minimum payout" in label_text:
- data.est_time_to_payout = span_text
- except Exception as e:
- logging.error(f"Error parsing user stats: {e}")
-
- # Parse blocks found data
- try:
- blocks_container = soup.find(lambda tag: tag.name == "div" and "blocks found" in tag.get_text(strip=True).lower())
- if blocks_container:
- span = blocks_container.find_next_sibling("span")
- if span:
- num_match = re.search(r'(\d+)', span.get_text(strip=True))
- if num_match:
- data.blocks_found = num_match.group(1)
- except Exception as e:
- logging.error(f"Error parsing blocks found: {e}")
-
- # Parse last share time data
- try:
- workers_table = soup.find("tbody", id="workers-tablerows")
- if workers_table:
- for row in workers_table.find_all("tr", class_="table-row"):
- cells = row.find_all("td")
- if cells and cells[0].get_text(strip=True).lower().startswith("total"):
- last_share_str = cells[2].get_text(strip=True)
- try:
- naive_dt = datetime.strptime(last_share_str, "%Y-%m-%d %H:%M")
- utc_dt = naive_dt.replace(tzinfo=ZoneInfo("UTC"))
- la_dt = utc_dt.astimezone(ZoneInfo("America/Los_Angeles"))
- data.total_last_share = la_dt.strftime("%Y-%m-%d %I:%M %p")
- except Exception as e:
- logging.error(f"Error converting last share time '{last_share_str}': {e}")
- data.total_last_share = last_share_str
- break
- except Exception as e:
- logging.error(f"Error parsing last share time: {e}")
-
- return data
- except Exception as e:
- logging.error(f"Error fetching Ocean data: {e}")
- return None
-
-def fetch_url(session: requests.Session, url: str, timeout: int = 5):
- try:
- return session.get(url, timeout=timeout)
- except Exception as e:
- logging.error(f"Error fetching {url}: {e}")
- return None
-
-def get_bitcoin_stats(session: requests.Session, cache_data: dict):
- """Fetch Bitcoin network statistics with improved error handling and caching."""
- urls = {
- "difficulty": "https://blockchain.info/q/getdifficulty",
- "hashrate": "https://blockchain.info/q/hashrate",
- "ticker": "https://blockchain.info/ticker",
- "blockcount": "https://blockchain.info/q/getblockcount"
- }
-
- # Use previous cached values as defaults if available
- difficulty = cache_data.get("difficulty")
- network_hashrate = cache_data.get("network_hashrate")
- btc_price = cache_data.get("btc_price")
- block_count = cache_data.get("block_count")
-
- try:
- with ThreadPoolExecutor(max_workers=4) as executor:
- futures = {key: executor.submit(fetch_url, session, url) for key, url in urls.items()}
- responses = {key: futures[key].result(timeout=5) for key in futures}
-
- # Process each response individually with error handling
- if responses["difficulty"] and responses["difficulty"].ok:
- try:
- difficulty = float(responses["difficulty"].text)
- cache_data["difficulty"] = difficulty
- except (ValueError, TypeError) as e:
- logging.error(f"Error parsing difficulty: {e}")
-
- if responses["hashrate"] and responses["hashrate"].ok:
- try:
- network_hashrate = float(responses["hashrate"].text) * 1e9
- cache_data["network_hashrate"] = network_hashrate
- except (ValueError, TypeError) as e:
- logging.error(f"Error parsing network hashrate: {e}")
-
- if responses["ticker"] and responses["ticker"].ok:
- try:
- ticker_data = responses["ticker"].json()
- btc_price = float(ticker_data.get("USD", {}).get("last", btc_price))
- cache_data["btc_price"] = btc_price
- except (ValueError, TypeError, json.JSONDecodeError) as e:
- logging.error(f"Error parsing BTC price: {e}")
-
- if responses["blockcount"] and responses["blockcount"].ok:
- try:
- block_count = int(responses["blockcount"].text)
- cache_data["block_count"] = block_count
- except (ValueError, TypeError) as e:
- logging.error(f"Error parsing block count: {e}")
-
- except Exception as e:
- logging.error(f"Error fetching Bitcoin stats: {e}")
-
- return difficulty, network_hashrate, btc_price, block_count
-
-# --- Dashboard Class ---
-class MiningDashboardWeb:
- def __init__(self, power_cost, power_usage, wallet):
- self.power_cost = power_cost
- self.power_usage = power_usage
- self.wallet = wallet
- self.cache = {}
- self.sats_per_btc = 100_000_000
- self.previous_values = {}
- self.session = requests.Session()
-
- def fetch_metrics(self):
- # Add execution time tracking
- start_time = time.time()
-
- try:
- with ThreadPoolExecutor(max_workers=2) as executor:
- future_ocean = executor.submit(get_ocean_data, self.session, self.wallet)
- future_btc = executor.submit(get_bitcoin_stats, self.session, self.cache)
- try:
- ocean_data = future_ocean.result(timeout=15)
- btc_stats = future_btc.result(timeout=15)
- except Exception as e:
- logging.error(f"Error fetching metrics concurrently: {e}")
- return None
-
- if ocean_data is None:
- logging.error("Failed to retrieve Ocean data")
- return None
-
- difficulty, network_hashrate, btc_price, block_count = btc_stats
-
- # If we failed to get network hashrate, use a reasonable default to prevent division by zero
- if network_hashrate is None:
- logging.warning("Using default network hashrate")
- network_hashrate = 500e18 # ~500 EH/s as a reasonable fallback
-
- # If we failed to get BTC price, use a reasonable default
- if btc_price is None:
- logging.warning("Using default BTC price")
- btc_price = 75000 # $75,000 as a reasonable fallback
-
- # Convert hashrates to a common unit (TH/s) for consistency
- hr3 = ocean_data.hashrate_3hr or 0
- hr3_unit = (ocean_data.hashrate_3hr_unit or 'th/s').lower()
- local_hashrate = convert_to_ths(hr3, hr3_unit) * 1e12 # Convert to H/s for calculation
-
- hash_proportion = local_hashrate / network_hashrate if network_hashrate else 0
- block_reward = 3.125
- blocks_per_day = 86400 / 600
- daily_btc_gross = hash_proportion * block_reward * blocks_per_day
- daily_btc_net = daily_btc_gross * (1 - 0.02 - 0.028)
-
- daily_revenue = round(daily_btc_net * btc_price, 2) if btc_price is not None else None
- daily_power_cost = round((self.power_usage / 1000) * self.power_cost * 24, 2)
- daily_profit_usd = round(daily_revenue - daily_power_cost, 2) if daily_revenue is not None else None
- monthly_profit_usd = round(daily_profit_usd * 30, 2) if daily_profit_usd is not None else None
-
- daily_mined_sats = int(round(daily_btc_net * self.sats_per_btc))
- monthly_mined_sats = daily_mined_sats * 30
-
- # Use default 0 for earnings if scraping returned None.
- estimated_earnings_per_day = ocean_data.estimated_earnings_per_day if ocean_data.estimated_earnings_per_day is not None else 0
- estimated_earnings_next_block = ocean_data.estimated_earnings_next_block if ocean_data.estimated_earnings_next_block is not None else 0
- estimated_rewards_in_window = ocean_data.estimated_rewards_in_window if ocean_data.estimated_rewards_in_window is not None else 0
-
- metrics = {
- 'pool_total_hashrate': ocean_data.pool_total_hashrate,
- 'pool_total_hashrate_unit': ocean_data.pool_total_hashrate_unit,
- 'hashrate_24hr': ocean_data.hashrate_24hr,
- 'hashrate_24hr_unit': ocean_data.hashrate_24hr_unit,
- 'hashrate_3hr': ocean_data.hashrate_3hr,
- 'hashrate_3hr_unit': ocean_data.hashrate_3hr_unit,
- 'hashrate_10min': ocean_data.hashrate_10min,
- 'hashrate_10min_unit': ocean_data.hashrate_10min_unit,
- 'hashrate_5min': ocean_data.hashrate_5min,
- 'hashrate_5min_unit': ocean_data.hashrate_5min_unit,
- 'hashrate_60sec': ocean_data.hashrate_60sec,
- 'hashrate_60sec_unit': ocean_data.hashrate_60sec_unit,
- 'workers_hashing': ocean_data.workers_hashing,
- 'btc_price': btc_price,
- 'block_number': block_count,
- 'network_hashrate': (network_hashrate / 1e18) if network_hashrate else None,
- 'difficulty': difficulty,
- 'daily_btc_net': daily_btc_net,
- 'estimated_earnings_per_day': estimated_earnings_per_day,
- 'daily_revenue': daily_revenue,
- 'daily_power_cost': daily_power_cost,
- 'daily_profit_usd': daily_profit_usd,
- 'monthly_profit_usd': monthly_profit_usd,
- 'daily_mined_sats': daily_mined_sats,
- 'monthly_mined_sats': monthly_mined_sats,
- 'estimated_earnings_next_block': estimated_earnings_next_block,
- 'estimated_rewards_in_window': estimated_rewards_in_window,
- 'unpaid_earnings': ocean_data.unpaid_earnings,
- 'est_time_to_payout': ocean_data.est_time_to_payout,
- 'last_block_height': ocean_data.last_block_height,
- 'last_block_time': ocean_data.last_block_time,
- 'total_last_share': ocean_data.total_last_share,
- 'blocks_found': ocean_data.blocks_found or "0",
- # Last block earnings (in sats)
- 'last_block_earnings': ocean_data.last_block_earnings
- }
- metrics['estimated_earnings_per_day_sats'] = int(round(estimated_earnings_per_day * self.sats_per_btc))
- metrics['estimated_earnings_next_block_sats'] = int(round(estimated_earnings_next_block * self.sats_per_btc))
- metrics['estimated_rewards_in_window_sats'] = int(round(estimated_rewards_in_window * self.sats_per_btc))
-
- # Ensure we have at least one data point for history at startup
- if not arrow_history.get('hashrate_60sec') and ocean_data.hashrate_60sec:
- logging.info("Initializing hashrate_60sec history with first data point")
- current_minute = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%H:%M")
- current_second = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%H:%M:%S")
- if 'hashrate_60sec' not in arrow_history:
- arrow_history['hashrate_60sec'] = []
-
- # Add a starter point for the chart
- arrow_history['hashrate_60sec'].append({
- "time": current_second,
- "value": float(ocean_data.hashrate_60sec),
- "arrow": ""
- })
- # Add a second point slightly offset to ensure chart renders
- arrow_history['hashrate_60sec'].append({
- "time": current_second.replace(current_second[-1], str(int(current_second[-1])+1 % 10)),
- "value": float(ocean_data.hashrate_60sec),
- "arrow": ""
- })
- logging.info(f"Added initial data points for chart: {ocean_data.hashrate_60sec} {ocean_data.hashrate_60sec_unit}")
-
- arrow_keys = [
- "pool_total_hashrate", "hashrate_24hr", "hashrate_3hr", "hashrate_10min",
- "hashrate_60sec", "block_number", "btc_price", "network_hashrate",
- "difficulty", "daily_revenue", "daily_power_cost", "daily_profit_usd",
- "monthly_profit_usd", "daily_mined_sats", "monthly_mined_sats", "unpaid_earnings",
- "estimated_earnings_per_day_sats", "estimated_earnings_next_block_sats", "estimated_rewards_in_window_sats",
- "workers_hashing"
- ]
-
- # --- Bucket by second (Los Angeles Time) with thread safety ---
- current_second = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%H:%M:%S")
- with state_lock:
- for key in arrow_keys:
- if metrics.get(key) is not None:
- current_val = metrics[key]
- arrow = ""
- if key in arrow_history and arrow_history[key]:
- previous_val = arrow_history[key][-1]["value"]
- if current_val > previous_val:
- arrow = "↑"
- elif current_val < previous_val:
- arrow = "↓"
- if key not in arrow_history:
- arrow_history[key] = []
- if not arrow_history[key] or arrow_history[key][-1]["time"] != current_second:
- arrow_history[key].append({
- "time": current_second,
- "value": current_val,
- "arrow": arrow
- })
- else:
- arrow_history[key][-1]["value"] = current_val
- arrow_history[key][-1]["arrow"] = arrow
- # Cap history to three hours worth (180 entries)
- if len(arrow_history[key]) > MAX_HISTORY_ENTRIES:
- arrow_history[key] = arrow_history[key][-MAX_HISTORY_ENTRIES:]
-
- # --- Aggregate arrow_history by minute for the graph ---
- aggregated_history = {}
- for key, entries in arrow_history.items():
- minute_groups = {}
- for entry in entries:
- minute = entry["time"][:5] # extract HH:MM
- minute_groups[minute] = entry # take last entry for that minute
- aggregated_history[key] = list(minute_groups.values())
- metrics["arrow_history"] = aggregated_history
- metrics["history"] = hashrate_history
-
- global metrics_log
- entry = {"timestamp": datetime.now().isoformat(), "metrics": metrics}
- metrics_log.append(entry)
- # Cap the metrics log to three hours worth (180 entries)
- if len(metrics_log) > MAX_HISTORY_ENTRIES:
- metrics_log = metrics_log[-MAX_HISTORY_ENTRIES:]
-
- # --- Add server timestamps to the response in Los Angeles Time ---
- metrics["server_timestamp"] = datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
- metrics["server_start_time"] = SERVER_START_TIME.astimezone(ZoneInfo("America/Los_Angeles")).isoformat()
-
- # Log execution time
- execution_time = time.time() - start_time
- metrics["execution_time"] = execution_time
- if execution_time > 10:
- logging.warning(f"Metrics fetch took {execution_time:.2f} seconds")
- else:
- logging.info(f"Metrics fetch completed in {execution_time:.2f} seconds")
-
- return metrics
-
- except Exception as e:
- logging.error(f"Unexpected error in fetch_metrics: {e}")
- return None
-
-# --- Workers Dashboard Functions ---
-def get_workers_data(force_refresh=False):
- """Get worker data from Ocean.xyz with caching for better performance."""
- global worker_data_cache, last_worker_data_update
-
- current_time = time.time()
-
- # Return cached data if it's still fresh and not forced to refresh
- if not force_refresh and worker_data_cache and last_worker_data_update and \
- (current_time - last_worker_data_update) < WORKER_DATA_CACHE_TIMEOUT:
- logging.info("Using cached worker data")
- return worker_data_cache
-
- try:
- # If metrics aren't available yet, return default data
- if not cached_metrics:
- return generate_default_workers_data()
-
- # Check if we have workers_hashing information
- workers_count = cached_metrics.get("workers_hashing", 0)
- if workers_count <= 0:
- return generate_default_workers_data()
-
- # Get hashrate from cached metrics - using EXACT value
- # Store this ORIGINAL value to ensure it's never changed in calculations
- original_hashrate_3hr = float(cached_metrics.get("hashrate_3hr", 0) or 0)
- hashrate_unit = cached_metrics.get("hashrate_3hr_unit", "TH/s")
-
- # Generate worker data based on the number of active workers
- workers_data = generate_workers_data(workers_count, original_hashrate_3hr, hashrate_unit)
-
- # Calculate basic statistics
- workers_online = len([w for w in workers_data if w['status'] == 'online'])
- workers_offline = len(workers_data) - workers_online
-
- # MODIFIED: Use unpaid_earnings from main dashboard instead of calculating from workers
- unpaid_earnings = cached_metrics.get("unpaid_earnings", 0)
- # Handle case where unpaid_earnings might be a string
- if isinstance(unpaid_earnings, str):
- try:
- # Handle case where it might include "BTC" or other text
- unpaid_earnings = float(unpaid_earnings.split()[0].replace(',', ''))
- except (ValueError, IndexError):
- unpaid_earnings = 0
-
- # Use unpaid_earnings as total_earnings
- total_earnings = unpaid_earnings
-
- avg_acceptance_rate = sum([float(w.get('acceptance_rate', 0) or 0) for w in workers_data]) / len(workers_data) if workers_data else 0
-
- # IMPORTANT: Use the EXACT original value for total_hashrate
- # Do NOT recalculate it from worker data
- total_hashrate = original_hashrate_3hr
-
- # Daily sats from main dashboard
- daily_sats = cached_metrics.get("daily_mined_sats", 0)
-
- # Create hashrate history based on arrow_history if available
- hashrate_history = []
- if cached_metrics.get("arrow_history") and cached_metrics["arrow_history"].get("hashrate_3hr"):
- hashrate_history = cached_metrics["arrow_history"]["hashrate_3hr"]
-
- result = {
- "workers": workers_data,
- "workers_total": len(workers_data),
- "workers_online": workers_online,
- "workers_offline": workers_offline,
- "total_hashrate": total_hashrate, # EXACT value from main dashboard
- "hashrate_unit": hashrate_unit,
- "total_earnings": total_earnings, # Now using unpaid_earnings
- "daily_sats": daily_sats,
- "avg_acceptance_rate": avg_acceptance_rate,
- "hashrate_history": hashrate_history,
- "timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
- }
-
- # Update cache
- worker_data_cache = result
- last_worker_data_update = current_time
-
- return result
- except Exception as e:
- logging.error(f"Error getting worker data: {e}")
- return generate_default_workers_data()
-
-# Modified generate_workers_data function for App.py
-
-def generate_workers_data(num_workers, total_hashrate, hashrate_unit, total_unpaid_earnings=None):
- """Generate simulated worker data based on total hashrate, ensuring total matches exactly.
- Now also distributes unpaid earnings proportionally when provided."""
- # Worker model types for simulation
- models = [
- {"type": "ASIC", "model": "Bitmain Antminer S19 Pro", "max_hashrate": 110, "power": 3250},
- {"type": "ASIC", "model": "MicroBT Whatsminer M50S", "max_hashrate": 130, "power": 3276},
- {"type": "ASIC", "model": "Bitmain Antminer S19j Pro", "max_hashrate": 104, "power": 3150},
- {"type": "FPGA", "model": "BitAxe FPGA Miner", "max_hashrate": 3.2, "power": 35}
- ]
-
- # Worker names for simulation
- prefixes = ["Antminer", "Whatsminer", "Miner", "Rig", "Node", "Worker", "BitAxe", "BTC"]
-
- # Calculate hashrate distribution - majority of hashrate to online workers
- online_count = max(1, int(num_workers * 0.8)) # At least 1 online worker
- offline_count = num_workers - online_count
-
- # Average hashrate per online worker
- avg_hashrate = total_hashrate / online_count if online_count > 0 else 0
-
- workers = []
- current_time = datetime.now(ZoneInfo("America/Los_Angeles"))
-
- # Default total unpaid earnings if not provided
- if total_unpaid_earnings is None or total_unpaid_earnings <= 0:
- total_unpaid_earnings = 0.001 # Default small amount
-
- # Generate online workers
- for i in range(online_count):
- # Select a model based on hashrate
- model_info = models[0] if avg_hashrate > 50 else models[-1] if avg_hashrate < 5 else random.choice(models)
-
- # For Antminers and regular ASICs, use ASIC model
- if i < online_count - 1 or avg_hashrate > 5:
- model_idx = random.randint(0, len(models) - 2) # Exclude FPGA for most workers
- else:
- model_idx = len(models) - 1 # FPGA for last worker if small hashrate
-
- model_info = models[model_idx]
-
- # Generate hashrate with some random variation
- base_hashrate = min(model_info["max_hashrate"], avg_hashrate * random.uniform(0.5, 1.5))
- hashrate_60sec = round(base_hashrate * random.uniform(0.9, 1.1), 2)
- hashrate_3hr = round(base_hashrate * random.uniform(0.85, 1.0), 2)
-
- # Generate last share time (within last 5 minutes)
- minutes_ago = random.randint(0, 5)
- last_share = (current_time - timedelta(minutes=minutes_ago)).strftime("%Y-%m-%d %H:%M")
-
- # Generate acceptance rate (95-100%)
- acceptance_rate = round(random.uniform(95, 100), 1)
-
- # Generate temperature (normal operating range)
- temperature = random.randint(55, 70) if model_info["type"] == "ASIC" else random.randint(45, 55)
-
- # Create a unique name
- if model_info["type"] == "FPGA":
- name = f"{prefixes[-1]}{random.randint(1, 99):02d}"
- else:
- name = f"{random.choice(prefixes[:-1])}{random.randint(1, 99):02d}"
-
- workers.append({
- "name": name,
- "status": "online",
- "type": model_info["type"],
- "model": model_info["model"],
- "hashrate_60sec": hashrate_60sec,
- "hashrate_60sec_unit": hashrate_unit,
- "hashrate_3hr": hashrate_3hr,
- "hashrate_3hr_unit": hashrate_unit,
- "efficiency": round(random.uniform(65, 95), 1),
- "last_share": last_share,
- "earnings": 0, # Will be set after all workers are generated
- "acceptance_rate": acceptance_rate,
- "power_consumption": model_info["power"],
- "temperature": temperature
- })
-
- # Generate offline workers
- for i in range(offline_count):
- # Select a model - more likely to be FPGA for offline
- if random.random() > 0.6:
- model_info = models[-1] # FPGA
- else:
- model_info = random.choice(models[:-1]) # ASIC
-
- # Generate last share time (0.5 to 8 hours ago)
- hours_ago = random.uniform(0.5, 8)
- last_share = (current_time - timedelta(hours=hours_ago)).strftime("%Y-%m-%d %H:%M")
-
- # Generate hashrate (historical before going offline)
- if model_info["type"] == "FPGA":
- hashrate_3hr = round(random.uniform(1, 3), 2)
- else:
- hashrate_3hr = round(random.uniform(20, 90), 2)
-
- # Create a unique name
- if model_info["type"] == "FPGA":
- name = f"{prefixes[-1]}{random.randint(1, 99):02d}"
- else:
- name = f"{random.choice(prefixes[:-1])}{random.randint(1, 99):02d}"
-
- workers.append({
- "name": name,
- "status": "offline",
- "type": model_info["type"],
- "model": model_info["model"],
- "hashrate_60sec": 0,
- "hashrate_60sec_unit": hashrate_unit,
- "hashrate_3hr": hashrate_3hr,
- "hashrate_3hr_unit": hashrate_unit,
- "efficiency": 0,
- "last_share": last_share,
- "earnings": 0, # Minimal earnings for offline workers
- "acceptance_rate": round(random.uniform(95, 99), 1),
- "power_consumption": 0,
- "temperature": 0
- })
-
- # --- NEW CODE FOR HASHRATE ALIGNMENT ---
- # Calculate the current sum of online worker hashrates
- current_total = sum(w["hashrate_3hr"] for w in workers if w["status"] == "online")
-
- # If we have online workers and the total doesn't match, apply a scaling factor
- if online_count > 0 and abs(current_total - total_hashrate) > 0.01:
- scaling_factor = total_hashrate / current_total if current_total > 0 else 1
-
- # Apply scaling to all online workers
- for worker in workers:
- if worker["status"] == "online":
- # Scale the 3hr hashrate to exactly match total
- worker["hashrate_3hr"] = round(worker["hashrate_3hr"] * scaling_factor, 2)
-
- # Scale the 60sec hashrate proportionally
- if worker["hashrate_60sec"] > 0:
- worker["hashrate_60sec"] = round(worker["hashrate_60sec"] * scaling_factor, 2)
-
- # --- NEW CODE TO DISTRIBUTE UNPAID EARNINGS PROPORTIONALLY ---
- # First calculate the total effective hashrate (only from online workers)
- total_effective_hashrate = sum(w["hashrate_3hr"] for w in workers if w["status"] == "online")
-
- # Reserve a small portion (5%) of earnings for offline workers
- online_earnings_pool = total_unpaid_earnings * 0.95
- offline_earnings_pool = total_unpaid_earnings * 0.05
-
- # Distribute earnings based on hashrate proportion for online workers
- if total_effective_hashrate > 0:
- for worker in workers:
- if worker["status"] == "online":
- hashrate_proportion = worker["hashrate_3hr"] / total_effective_hashrate
- worker["earnings"] = round(online_earnings_pool * hashrate_proportion, 8)
-
- # Distribute minimal earnings to offline workers
- if offline_count > 0:
- offline_per_worker = offline_earnings_pool / offline_count
- for worker in workers:
- if worker["status"] == "offline":
- worker["earnings"] = round(offline_per_worker, 8)
-
- # Final verification - ensure total earnings match
- current_total_earnings = sum(w["earnings"] for w in workers)
- if abs(current_total_earnings - total_unpaid_earnings) > 0.00000001:
- # Adjust the first worker to account for any rounding errors
- adjustment = total_unpaid_earnings - current_total_earnings
- for worker in workers:
- if worker["status"] == "online":
- worker["earnings"] = round(worker["earnings"] + adjustment, 8)
- break
-
- return workers
-
-# Modified get_workers_data function with exact hashrate handling
-
-def get_workers_data(force_refresh=False):
- """Get worker data from Ocean.xyz with caching for better performance."""
- global worker_data_cache, last_worker_data_update
-
- current_time = time.time()
-
- # Return cached data if it's still fresh and not forced to refresh
- if not force_refresh and worker_data_cache and last_worker_data_update and \
- (current_time - last_worker_data_update) < WORKER_DATA_CACHE_TIMEOUT:
- logging.info("Using cached worker data")
- return worker_data_cache
-
- try:
- # If metrics aren't available yet, return default data
- if not cached_metrics:
- return generate_default_workers_data()
-
- # Check if we have workers_hashing information
- workers_count = cached_metrics.get("workers_hashing", 0)
- if workers_count <= 0:
- return generate_default_workers_data()
-
- # Get hashrate from cached metrics - using EXACT value
- # Store this ORIGINAL value to ensure it's never changed in calculations
- original_hashrate_3hr = float(cached_metrics.get("hashrate_3hr", 0) or 0)
- hashrate_unit = cached_metrics.get("hashrate_3hr_unit", "TH/s")
-
- # Generate worker data based on the number of active workers
- workers_data = generate_workers_data(workers_count, original_hashrate_3hr, hashrate_unit)
-
- # Calculate basic statistics
- workers_online = len([w for w in workers_data if w['status'] == 'online'])
- workers_offline = len(workers_data) - workers_online
-
- # MODIFIED: Use unpaid_earnings from main dashboard instead of calculating from workers
- unpaid_earnings = cached_metrics.get("unpaid_earnings", 0)
- # Handle case where unpaid_earnings might be a string
- if isinstance(unpaid_earnings, str):
- try:
- # Handle case where it might include "BTC" or other text
- unpaid_earnings = float(unpaid_earnings.split()[0].replace(',', ''))
- except (ValueError, IndexError):
- unpaid_earnings = 0
-
- # Use unpaid_earnings as total_earnings
- total_earnings = unpaid_earnings
-
- # Debug log
- logging.info(f"Using unpaid_earnings as total_earnings: {unpaid_earnings} BTC")
-
- avg_acceptance_rate = sum([float(w.get('acceptance_rate', 0) or 0) for w in workers_data]) / len(workers_data) if workers_data else 0
-
- # IMPORTANT: Use the EXACT original value for total_hashrate
- # Do NOT recalculate it from worker data
- total_hashrate = original_hashrate_3hr
-
- # Daily sats from main dashboard
- daily_sats = cached_metrics.get("daily_mined_sats", 0)
-
- # Create hashrate history based on arrow_history if available
- hashrate_history = []
- if cached_metrics.get("arrow_history") and cached_metrics["arrow_history"].get("hashrate_3hr"):
- hashrate_history = cached_metrics["arrow_history"]["hashrate_3hr"]
-
- result = {
- "workers": workers_data,
- "workers_total": len(workers_data),
- "workers_online": workers_online,
- "workers_offline": workers_offline,
- "total_hashrate": total_hashrate, # EXACT value from main dashboard
- "hashrate_unit": hashrate_unit,
- "total_earnings": total_earnings, # Now using unpaid_earnings
- "daily_sats": daily_sats,
- "avg_acceptance_rate": avg_acceptance_rate,
- "hashrate_history": hashrate_history,
- "timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
- }
-
- # Update cache
- worker_data_cache = result
- last_worker_data_update = current_time
-
- return result
- except Exception as e:
- logging.error(f"Error getting worker data: {e}")
- return generate_default_workers_data()
-
-# --- New Time Endpoint for Fine Syncing ---
-@app.route("/api/time")
-def api_time():
- return jsonify({
- "server_timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat(),
- "server_start_time": SERVER_START_TIME.astimezone(ZoneInfo("America/Los_Angeles")).isoformat()
- })
-
-# --- Workers Dashboard Route and API ---
-@app.route("/workers")
-def workers_dashboard():
- """Serve the workers overview dashboard page."""
- current_time = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%Y-%m-%d %I:%M:%S %p")
-
- # Only get minimal worker stats for initial page load
- # Client-side JS will fetch the full data via API
- workers_data = get_workers_data()
-
- return render_template("workers.html",
- current_time=current_time,
- workers_total=workers_data.get('workers_total', 0),
- workers_online=workers_data.get('workers_online', 0),
- workers_offline=workers_data.get('workers_offline', 0),
- total_hashrate=workers_data.get('total_hashrate', 0),
- hashrate_unit=workers_data.get('hashrate_unit', 'TH/s'),
- total_earnings=workers_data.get('total_earnings', 0),
- daily_sats=workers_data.get('daily_sats', 0),
- avg_acceptance_rate=workers_data.get('avg_acceptance_rate', 0))
-
-@app.route("/api/workers")
-def api_workers():
- """API endpoint for worker data."""
- # Get the force_refresh parameter from the query string (default: False)
- force_refresh = request.args.get('force', 'false').lower() == 'true'
- return jsonify(get_workers_data(force_refresh=force_refresh))
-
-# --- Modified update_metrics_job function ---
-def update_metrics_job(force=False):
- global cached_metrics, last_metrics_update_time, scheduler, scheduler_last_successful_run
-
- try:
- # Check scheduler health - enhanced logic to detect failed executors
- if not scheduler or not hasattr(scheduler, 'running'):
- logging.error("Scheduler object is invalid, attempting to recreate")
- with scheduler_recreate_lock:
- create_scheduler()
- return
-
- if not scheduler.running:
- logging.warning("Scheduler stopped unexpectedly, attempting to restart")
- try:
- scheduler.start()
- logging.info("Scheduler restarted successfully")
- except Exception as e:
- logging.error(f"Failed to restart scheduler: {e}")
- # More aggressive recovery - recreate scheduler entirely
- with scheduler_recreate_lock:
- create_scheduler()
- return
-
- # Test the scheduler's executor by checking its state
- try:
- # Check if any jobs exist and are scheduled
- jobs = scheduler.get_jobs()
- if not jobs:
- logging.error("No jobs found in scheduler - recreating")
- with scheduler_recreate_lock:
- create_scheduler()
- return
-
- # Check if the next run time is set for any job
- next_runs = [job.next_run_time for job in jobs]
- if not any(next_runs):
- logging.error("No jobs with next_run_time found - recreating scheduler")
- with scheduler_recreate_lock:
- create_scheduler()
- return
- except RuntimeError as e:
- # Properly handle the "cannot schedule new futures after shutdown" error
- if "cannot schedule new futures after shutdown" in str(e):
- logging.error("Detected dead executor, recreating scheduler")
- with scheduler_recreate_lock:
- create_scheduler()
- return
- except Exception as e:
- logging.error(f"Error checking scheduler state: {e}")
-
- # Skip update if the last one was too recent (prevents overlapping runs)
- # Unless force=True is specified
- current_time = time.time()
- if not force and last_metrics_update_time and (current_time - last_metrics_update_time < 30):
- logging.info("Skipping metrics update - previous update too recent")
- return
-
- # Set last update time to now
- last_metrics_update_time = current_time
-
- # Add timeout handling with a timer
- job_timeout = 45 # seconds
- job_successful = False
-
- def timeout_handler():
- if not job_successful:
- logging.error("Background job timed out after 45 seconds")
-
- # Set timeout timer
- timer = threading.Timer(job_timeout, timeout_handler)
- timer.daemon = True
- timer.start()
-
- try:
- # Correctly call the dashboard instance's fetch_metrics method
- metrics = dashboard.fetch_metrics()
- if metrics:
- global cached_metrics
- cached_metrics = metrics
- logging.info("Background job: Metrics updated successfully")
- job_successful = True
-
- # Mark successful run time for watchdog
- global scheduler_last_successful_run
- scheduler_last_successful_run = time.time()
-
- persist_critical_state()
-
- # Periodically check and prune data to prevent memory growth
- if current_time % 300 < 60: # Every ~5 minutes
- prune_old_data()
-
- # Only save state to Redis on a similar schedule, not every update
- if current_time % 300 < 60: # Every ~5 minutes
- save_graph_state()
-
- # Periodic full memory cleanup (every 2 hours)
- if current_time % 7200 < 60: # Every ~2 hours
- logging.info("Performing full memory cleanup")
- gc.collect(generation=2) # Force full collection
-
- # CHANGE: Removed the worker data preparation from here
- # No longer attaching workers_data to cached_metrics
- else:
- logging.error("Background job: Metrics update returned None")
- except Exception as e:
- logging.error(f"Background job: Unexpected error: {e}")
- import traceback
- logging.error(traceback.format_exc())
- log_memory_usage()
- finally:
- # Cancel timer in finally block to ensure it's always canceled
- timer.cancel()
- except Exception as e:
- logging.error(f"Background job: Unhandled exception: {e}")
- import traceback
- logging.error(traceback.format_exc())
-
-# --- Fixed SSE Endpoint with proper request context handling ---
-@app.route('/stream')
-def stream():
- # Important: Capture any request context information BEFORE the generator
- # This ensures we're not trying to access request outside its context
-
- def event_stream():
- global active_sse_connections, cached_metrics
- client_id = None
-
- try:
- # Check if we're at the connection limit
- with sse_connections_lock:
- if active_sse_connections >= MAX_SSE_CONNECTIONS:
- logging.warning(f"Connection limit reached ({MAX_SSE_CONNECTIONS}), refusing new SSE connection")
- yield f"data: {{\"error\": \"Too many connections, please try again later\", \"retry\": 5000}}\n\n"
- return
-
- active_sse_connections += 1
- client_id = f"client-{int(time.time() * 1000) % 10000}"
- logging.info(f"SSE {client_id}: Connection established (total: {active_sse_connections})")
-
- # Set a maximum connection time - increased to 15 minutes for better user experience
- end_time = time.time() + MAX_SSE_CONNECTION_TIME
- last_timestamp = None
-
- # Send initial data immediately to prevent delay in dashboard updates
- if cached_metrics:
- yield f"data: {json.dumps(cached_metrics)}\n\n"
- last_timestamp = cached_metrics.get("server_timestamp")
- else:
- # Send ping if no data available yet
- yield f"data: {{\"type\": \"ping\", \"client_id\": \"{client_id}\"}}\n\n"
-
- # Main event loop with improved error handling
- while time.time() < end_time:
- try:
- # Send data only if it's changed
- if cached_metrics and cached_metrics.get("server_timestamp") != last_timestamp:
- data = json.dumps(cached_metrics)
- last_timestamp = cached_metrics.get("server_timestamp")
- yield f"data: {data}\n\n"
-
- # Send regular pings about every 30 seconds to keep connection alive
- if int(time.time()) % 30 == 0:
- yield f"data: {{\"type\": \"ping\", \"time\": {int(time.time())}, \"connections\": {active_sse_connections}}}\n\n"
-
- # Sleep to reduce CPU usage
- time.sleep(1)
-
- # Warn client 60 seconds before timeout so client can prepare to reconnect
- remaining_time = end_time - time.time()
- if remaining_time < 60 and int(remaining_time) % 15 == 0: # Every 15 sec in last minute
- yield f"data: {{\"type\": \"timeout_warning\", \"remaining\": {int(remaining_time)}}}\n\n"
-
- except Exception as e:
- logging.error(f"SSE {client_id}: Error in stream: {e}")
- time.sleep(2) # Prevent tight error loops
-
- # Connection timeout reached - send a reconnect instruction to client
- logging.info(f"SSE {client_id}: Connection timeout reached ({MAX_SSE_CONNECTION_TIME}s)")
- yield f"data: {{\"type\": \"timeout\", \"message\": \"Connection timeout reached\", \"reconnect\": true}}\n\n"
-
- except GeneratorExit:
- # This is how we detect client disconnection
- logging.info(f"SSE {client_id}: Client disconnected (GeneratorExit)")
- # Don't yield here - just let the generator exit normally
-
- finally:
- # Always decrement the connection counter when done
- with sse_connections_lock:
- active_sse_connections = max(0, active_sse_connections - 1)
- logging.info(f"SSE {client_id}: Connection closed (remaining: {active_sse_connections})")
-
- # Configure response with improved error handling
- try:
- response = Response(event_stream(), mimetype="text/event-stream")
- response.headers['Cache-Control'] = 'no-cache'
- response.headers['X-Accel-Buffering'] = 'no' # Disable nginx buffering
- response.headers['Access-Control-Allow-Origin'] = '*' # Allow CORS
- return response
- except Exception as e:
- logging.error(f"Error creating SSE response: {e}")
- return jsonify({"error": "Internal server error"}), 500
-
-# Duplicate stream endpoint for the dashboard path
-@app.route('/dashboard/stream')
-def dashboard_stream():
- """Duplicate of the stream endpoint for the dashboard route."""
- return stream()
-
-# --- SchedulerWatchdog to monitor and recover ---
-def scheduler_watchdog():
- """Periodically check if the scheduler is running and healthy"""
- global scheduler, scheduler_last_successful_run
-
- try:
- # If no successful run in past 2 minutes, consider the scheduler dead
- if (scheduler_last_successful_run is None or
- time.time() - scheduler_last_successful_run > 120):
- logging.warning("Scheduler watchdog: No successful runs detected in last 2 minutes")
-
- # Check if actual scheduler exists and is reported as running
- if not scheduler or not getattr(scheduler, 'running', False):
- logging.error("Scheduler watchdog: Scheduler appears to be dead, recreating")
-
- # Use the lock to avoid multiple threads recreating simultaneously
- with scheduler_recreate_lock:
- create_scheduler()
- except Exception as e:
- logging.error(f"Error in scheduler watchdog: {e}")
-
-# --- Create Scheduler ---
-def create_scheduler():
- """Create and configure a new scheduler instance with proper error handling."""
- try:
- # Stop existing scheduler if it exists
- global scheduler
- if 'scheduler' in globals() and scheduler:
- try:
- # Check if scheduler is running before attempting to shut it down
- if hasattr(scheduler, 'running') and scheduler.running:
- logging.info("Shutting down existing scheduler before creating a new one")
- scheduler.shutdown(wait=False)
- except Exception as e:
- logging.error(f"Error shutting down existing scheduler: {e}")
-
- # Create a new scheduler with more robust configuration
- new_scheduler = BackgroundScheduler(
- job_defaults={
- 'coalesce': True, # Combine multiple missed runs into a single one
- 'max_instances': 1, # Prevent job overlaps
- 'misfire_grace_time': 30 # Allow misfires up to 30 seconds
- }
- )
-
- # Add the update job
- new_scheduler.add_job(
- func=update_metrics_job,
- trigger="interval",
- seconds=60,
- id='update_metrics_job',
- replace_existing=True
- )
-
- # Add watchdog job - runs every 30 seconds to check scheduler health
- new_scheduler.add_job(
- func=scheduler_watchdog,
- trigger="interval",
- seconds=30,
- id='scheduler_watchdog',
- replace_existing=True
- )
-
- # Start the scheduler
- new_scheduler.start()
- logging.info("Scheduler created and started successfully")
- return new_scheduler
- except Exception as e:
- logging.error(f"Error creating scheduler: {e}")
- return None
-
-# --- Routes ---
-@app.route("/")
-def boot():
- """Serve the boot sequence page."""
- return render_template("boot.html", base_url=request.host_url.rstrip('/'))
-
-# --- Updated Dashboard Route ---
-@app.route("/dashboard")
-def dashboard():
- """Serve the main dashboard page."""
- global cached_metrics, last_metrics_update_time
-
- # Make sure we have metrics data before rendering the template
- if cached_metrics is None:
- # Force an immediate metrics fetch regardless of the time since last update
- logging.info("Dashboard accessed with no cached metrics - forcing immediate fetch")
- try:
- # Force update with the force parameter
- update_metrics_job(force=True)
- except Exception as e:
- logging.error(f"Error during forced metrics fetch: {e}")
-
- # If still None after our attempt, create default metrics
- if cached_metrics is None:
- default_metrics = {
- "server_timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat(),
- "server_start_time": SERVER_START_TIME.astimezone(ZoneInfo("America/Los_Angeles")).isoformat(),
- "hashrate_24hr": None,
- "hashrate_24hr_unit": "TH/s",
- "hashrate_3hr": None,
- "hashrate_3hr_unit": "TH/s",
- "hashrate_10min": None,
- "hashrate_10min_unit": "TH/s",
- "hashrate_60sec": None,
- "hashrate_60sec_unit": "TH/s",
- "pool_total_hashrate": None,
- "pool_total_hashrate_unit": "TH/s",
- "workers_hashing": 0,
- "total_last_share": None,
- "block_number": None,
- "btc_price": 0,
- "network_hashrate": 0,
- "difficulty": 0,
- "daily_revenue": 0,
- "daily_power_cost": 0,
- "daily_profit_usd": 0,
- "monthly_profit_usd": 0,
- "daily_mined_sats": 0,
- "monthly_mined_sats": 0,
- "unpaid_earnings": "0",
- "est_time_to_payout": None,
- "last_block_height": None,
- "last_block_time": None,
- "last_block_earnings": None,
- "blocks_found": "0",
- "estimated_earnings_per_day_sats": 0,
- "estimated_earnings_next_block_sats": 0,
- "estimated_rewards_in_window_sats": 0,
- "arrow_history": {}
- }
- logging.warning("Rendering dashboard with default metrics - no data available yet")
- current_time = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%Y-%m-%d %I:%M:%S %p")
- return render_template("index.html", metrics=default_metrics, current_time=current_time)
-
- # If we have metrics, use them
- current_time = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%Y-%m-%d %I:%M:%S %p")
- return render_template("index.html", metrics=cached_metrics, current_time=current_time)
-
-@app.route("/api/metrics")
-def api_metrics():
- if cached_metrics is None:
- update_metrics_job()
- return jsonify(cached_metrics)
-
-# Health check endpoint with detailed diagnostics
-@app.route("/api/health")
-def health_check():
- """Health check endpoint with enhanced system diagnostics."""
- # Calculate uptime
- uptime_seconds = (datetime.now(ZoneInfo("America/Los_Angeles")) - SERVER_START_TIME).total_seconds()
-
- # Get process memory usage
- try:
- process = psutil.Process(os.getpid())
- mem_info = process.memory_info()
- memory_usage_mb = mem_info.rss / 1024 / 1024
- memory_percent = process.memory_percent()
- except Exception as e:
- logging.error(f"Error getting memory usage: {e}")
- memory_usage_mb = 0
- memory_percent = 0
-
- # Check data freshness
- data_age = 0
- if cached_metrics and cached_metrics.get("server_timestamp"):
- try:
- last_update = datetime.fromisoformat(cached_metrics["server_timestamp"])
- data_age = (datetime.now(ZoneInfo("America/Los_Angeles")) - last_update).total_seconds()
- except Exception as e:
- logging.error(f"Error calculating data age: {e}")
-
- # Determine health status
- health_status = "healthy"
- if data_age > 300: # Data older than 5 minutes
- health_status = "degraded"
- if not cached_metrics:
- health_status = "unhealthy"
-
- # Build response with detailed diagnostics
- status = {
- "status": health_status,
- "uptime": uptime_seconds,
- "uptime_formatted": f"{int(uptime_seconds // 3600)}h {int((uptime_seconds % 3600) // 60)}m {int(uptime_seconds % 60)}s",
- "connections": active_sse_connections,
- "memory": {
- "usage_mb": round(memory_usage_mb, 2),
- "percent": round(memory_percent, 2)
- },
- "data": {
- "last_update": cached_metrics.get("server_timestamp") if cached_metrics else None,
- "age_seconds": int(data_age),
- "available": cached_metrics is not None
- },
- "scheduler": {
- "running": scheduler.running if hasattr(scheduler, "running") else False,
- "last_successful_run": scheduler_last_successful_run
- },
- "redis": {
- "connected": redis_client is not None
- },
- "timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
- }
-
- # Log health check if status is not healthy
- if health_status != "healthy":
- logging.warning(f"Health check returning {health_status} status: {status}")
-
- return jsonify(status)
-
-# Add enhanced scheduler health check endpoint
-@app.route("/api/scheduler-health")
-def scheduler_health():
- try:
- scheduler_status = {
- "running": scheduler.running if hasattr(scheduler, "running") else False,
- "job_count": len(scheduler.get_jobs()) if hasattr(scheduler, "get_jobs") else 0,
- "next_run": str(scheduler.get_jobs()[0].next_run_time) if hasattr(scheduler, "get_jobs") and scheduler.get_jobs() else None,
- "last_update": last_metrics_update_time,
- "time_since_update": time.time() - last_metrics_update_time if last_metrics_update_time else None,
- "last_successful_run": scheduler_last_successful_run,
- "time_since_successful": time.time() - scheduler_last_successful_run if scheduler_last_successful_run else None
- }
- return jsonify(scheduler_status)
- except Exception as e:
- return jsonify({"error": str(e)}), 500
-
-# Add a health check route that can attempt to fix the scheduler if needed
-@app.route("/api/fix-scheduler", methods=["POST"])
-def fix_scheduler():
- try:
- with scheduler_recreate_lock:
- new_scheduler = create_scheduler()
- if new_scheduler:
- global scheduler
- scheduler = new_scheduler
- return jsonify({"status": "success", "message": "Scheduler recreated successfully"})
- else:
- return jsonify({"status": "error", "message": "Failed to recreate scheduler"}), 500
- except Exception as e:
- return jsonify({"status": "error", "message": str(e)}), 500
-
-@app.errorhandler(404)
-def page_not_found(e):
- return render_template("error.html", message="Page not found."), 404
-
-@app.errorhandler(500)
-def internal_server_error(e):
- logging.error("Internal server error: %s", e)
- return render_template("error.html", message="Internal server error."), 500
-
-@app.route("/api/force-refresh", methods=["POST"])
-def force_refresh():
- """Emergency endpoint to force metrics refresh."""
- logging.warning("Emergency force-refresh requested")
- try:
- # Force fetch new metrics
- metrics = dashboard.fetch_metrics()
- if metrics:
- global cached_metrics, scheduler_last_successful_run
- cached_metrics = metrics
- scheduler_last_successful_run = time.time()
- logging.info(f"Force refresh successful, new timestamp: {metrics['server_timestamp']}")
- return jsonify({"status": "success", "message": "Metrics refreshed", "timestamp": metrics['server_timestamp']})
- else:
- return jsonify({"status": "error", "message": "Failed to fetch metrics"}), 500
- except Exception as e:
- logging.error(f"Force refresh error: {e}")
- return jsonify({"status": "error", "message": str(e)}), 500
-
-class RobustMiddleware:
- def __init__(self, app):
- self.app = app
-
- def __call__(self, environ, start_response):
- try:
- return self.app(environ, start_response)
- except Exception as e:
- logging.exception("Unhandled exception in WSGI app")
- start_response("500 Internal Server Error", [("Content-Type", "text/html")])
- return [b"
Internal Server Error "]
-
-app.wsgi_app = RobustMiddleware(app.wsgi_app)
-
-# Initialize the dashboard and background scheduler
-config = load_config()
-dashboard = MiningDashboardWeb(
- config.get("power_cost", 0.0),
- config.get("power_usage", 0.0),
- config.get("wallet")
-)
-
-# Initialize the scheduler using our new function
-scheduler = create_scheduler()
-
-# Graceful shutdown handler for clean termination
-def graceful_shutdown(signum, frame):
- """Handle shutdown signals gracefully"""
- logging.info(f"Received shutdown signal {signum}, shutting down gracefully")
-
- # Save state before shutting down
- save_graph_state()
-
- # Stop the scheduler
- if scheduler:
- try:
- scheduler.shutdown(wait=True) # wait for running jobs to complete
- logging.info("Scheduler shutdown complete")
- except Exception as e:
- logging.error(f"Error shutting down scheduler: {e}")
-
- # Log connection info before exit
- logging.info(f"Active SSE connections at shutdown: {active_sse_connections}")
-
- # Exit with success code
- sys.exit(0)
-
-# Register signal handlers
-signal.signal(signal.SIGTERM, graceful_shutdown)
-signal.signal(signal.SIGINT, graceful_shutdown)
-
-# Worker pre and post fork hooks to handle Gunicorn worker cycling
-def worker_exit(server, worker):
- """Handle worker shutdown gracefully"""
- logging.info("Worker exit detected, shutting down scheduler")
- if 'scheduler' in globals() and scheduler:
- try:
- scheduler.shutdown(wait=False)
- logging.info("Scheduler shutdown on worker exit")
- except Exception as e:
- logging.error(f"Error shutting down scheduler on worker exit: {e}")
-
-# Handle worker initialization
-def on_starting(server):
- """Initialize shared resources before workers start"""
- logging.info("Gunicorn server starting")
-
-# Add this near the end of App.py, after scheduler initialization
-logging.info("Signal handlers registered for graceful shutdown")
-
-# --- Critical state recovery function ---
-def load_critical_state():
- """Recover critical state variables after a worker restart"""
- global cached_metrics, scheduler_last_successful_run, last_metrics_update_time
- if redis_client:
- try:
- state_json = redis_client.get("critical_state")
- if state_json:
- state = json.loads(state_json.decode('utf-8'))
- if state.get("last_successful_run"):
- scheduler_last_successful_run = state.get("last_successful_run")
- if state.get("last_update_time"):
- last_metrics_update_time = state.get("last_update_time")
- logging.info(f"Loaded critical state from Redis, last run: {scheduler_last_successful_run}")
-
- # We don't restore cached_metrics itself, as we'll fetch fresh data
- # Just note that we have state to recover from
- logging.info(f"Last metrics timestamp from Redis: {state.get('cached_metrics_timestamp')}")
- except Exception as e:
- logging.error(f"Error loading critical state: {e}")
-
-# Register signal handlers
-signal.signal(signal.SIGTERM, graceful_shutdown)
-signal.signal(signal.SIGINT, graceful_shutdown)
-
-# Add this near the end of App.py, after scheduler initialization
-logging.info("Signal handlers registered for graceful shutdown")
-
-# Load critical state if available
-load_critical_state()
-
-# Run once at startup.
-update_metrics_job(force=True)
-
-if __name__ == "__main__":
- # When deploying with Gunicorn in Docker, run with --workers=1 --threads=8 to ensure global state is shared.
- app.run(host="0.0.0.0", port=5000, debug=False, use_reloader=False)
diff --git a/Custom-Ocean.xyz-Dashboard/LICENSE.md b/Custom-Ocean.xyz-Dashboard/LICENSE.md
deleted file mode 100644
index 2066b7c..0000000
--- a/Custom-Ocean.xyz-Dashboard/LICENSE.md
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2025 DJObleezy
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/Custom-Ocean.xyz-Dashboard/README.md b/Custom-Ocean.xyz-Dashboard/README.md
deleted file mode 100644
index 4d72786..0000000
--- a/Custom-Ocean.xyz-Dashboard/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-# Ocean.xyz Bitcoin Mining Dashboard
-
-## A Practical Monitoring Solution for Bitcoin Miners
-
-This open-source dashboard provides comprehensive monitoring for Ocean.xyz pool miners, offering real-time data on hashrate, profitability, and worker status. Designed to be resource-efficient and user-friendly, it helps miners maintain oversight of their operations.
-
----
-## Gallery:
-
-
-
----
-
-## Practical Mining Intelligence
-
-The dashboard aggregates essential metrics in one accessible interface:
-
-- **Profitability Analysis**: Monitor daily and monthly earnings in BTC and USD
-- **Worker Status**: Track online/offline status of mining equipment
-- **Payout Monitoring**: View unpaid balance and estimated time to next payout
-- **Network Metrics**: Stay informed of difficulty adjustments and network hashrate
-- **Cost Analysis**: Calculate profit margins based on power consumption
-
-## Key Features
-
-### Mining Performance Metrics
-- **Hashrate Visualization**: Clear graphical representation of hashrate trends
-- **Financial Calculations**: Automatic conversion between BTC and USD values
-- **Payout Estimation**: Projected time until minimum payout threshold is reached
-- **Network Intelligence**: Current Bitcoin price, difficulty, and total network hashrate
-
-### Worker Management
-- **Equipment Overview**: Consolidated view of all mining devices
-- **Status Monitoring**: Clear indicators for active and inactive devices
-- **Performance Data**: Individual hashrate, temperature, and acceptance rate metrics
-- **Filtering Options**: Sort and search by device type or operational status
-
-### Thoughtful Design Elements
-- **Retro Terminal Monitor**: A floating system monitor with classic design aesthetics
-- **Boot Sequence**: An engaging initialization sequence on startup
-- **Responsive Interface**: Adapts seamlessly to desktop and mobile devices
-
-## Installation Options
-
-### Standard Installation
-
-1. Download the latest release package
-2. Configure your mining parameters in `config.json`:
- - Pool wallet address
- - Electricity cost ($/kWh)
- - System power consumption (watts)
-3. Launch the application using the included startup script
-4. Access the dashboard at `http://localhost:5000`
-
-### Docker Installation
-
-For those preferring containerized deployment:
-
-```bash
-docker run -d -p 5000:5000 -e WALLET=your-wallet-address -e POWER_COST=0.12 -e POWER_USAGE=3450 yourusername/ocean-mining-dashboard
-```
-
-Then navigate to `http://localhost:5000` in your web browser.
-
-## Dashboard Components
-
-### Main Dashboard
-
-- Interactive hashrate visualization
-- Detailed profitability metrics
-- Network statistics
-- Current Bitcoin price
-- Balance and payment information
-
-### Workers Dashboard
-
-
-- Fleet summary with aggregate statistics
-- Individual worker performance metrics
-- Status indicators for each device
-- Flexible filtering and search functionality
-
-### Retro Terminal Monitor
-
-
-- Floating interface providing system statistics
-- Progress indicator for data refresh cycles
-- System uptime display
-- Minimizable design for unobtrusive monitoring
-- Thoughtful visual styling reminiscent of classic computer terminals
-
-## System Requirements
-
-The application is designed for efficient resource utilization:
-- Compatible with standard desktop and laptop computers
-- Modest CPU and memory requirements
-- Suitable for continuous operation
-- Cross-platform support for Windows, macOS, and Linux
-
-## Troubleshooting
-
-For optimal performance:
-
-1. Use the refresh function if data appears outdated
-2. Verify network connectivity for consistent updates
-3. Restart the application after configuration changes
-4. Access the health endpoint at `/api/health` for system status information
-
-## Getting Started
-
-1. Download the latest release
-2. Configure with your mining information
-3. Launch the application to begin monitoring
-
-The dashboard requires only your Ocean.xyz mining wallet address for basic functionality.
-
----
-
-## Technical Foundation
-
-Built on Flask with Chart.js for visualization and Server-Sent Events for real-time updates, this dashboard retrieves data from Ocean.xyz and performs calculations based on current network metrics and your specified parameters.
-
-The application prioritizes stability and efficiency for reliable long-term operation. Source code is available for review and customization.
-
-## Acknowledgments
-
-- Ocean.xyz mining pool for their service
-- The open-source community for their contributions
-- Bitcoin protocol developers
-
-Available under the MIT License. This is an independent project not affiliated with Ocean.xyz.
diff --git a/Custom-Ocean.xyz-Dashboard/dockerfile b/Custom-Ocean.xyz-Dashboard/dockerfile
deleted file mode 100644
index 88fef62..0000000
--- a/Custom-Ocean.xyz-Dashboard/dockerfile
+++ /dev/null
@@ -1,55 +0,0 @@
-FROM python:3.9-slim
-
-WORKDIR /app
-
-# Install curl for healthcheck and other dependencies
-RUN apt-get update && \
- apt-get install -y --no-install-recommends curl && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
-# Install dependencies first to leverage Docker cache.
-COPY requirements.txt .
-RUN pip install --no-cache-dir -r requirements.txt
-
-# Copy the entire application.
-COPY . .
-
-# Run the minifier to process HTML templates.
-RUN python minify.py
-
-# Create a non-root user first.
-RUN adduser --disabled-password --gecos '' appuser
-
-# Change ownership of the /app directory so that appuser can write files.
-RUN chown -R appuser:appuser /app
-
-# Create a directory for logs with proper permissions
-RUN mkdir -p /app/logs && chown -R appuser:appuser /app/logs
-
-USER appuser
-
-EXPOSE 5000
-
-# Add environment variables for app configuration
-ENV FLASK_ENV=production
-ENV PYTHONUNBUFFERED=1
-ENV PYTHON_UNBUFFERED=1
-
-# Improve healthcheck reliability - use new health endpoint
-HEALTHCHECK --interval=15s --timeout=5s --start-period=30s --retries=3 \
- CMD curl -f http://localhost:5000/api/health || exit 1
-
-# Use Gunicorn as the production WSGI server with improved settings
-# For shared global state, we need to keep the single worker model but optimize other parameters
-CMD ["gunicorn", "-b", "0.0.0.0:5000", "App:app", \
- "--workers=1", \
- "--threads=12", \
- "--timeout=600", \
- "--keep-alive=5", \
- "--log-level=info", \
- "--access-logfile=-", \
- "--error-logfile=-", \
- "--log-file=-", \
- "--graceful-timeout=60", \
- "--worker-tmp-dir=/dev/shm"]
\ No newline at end of file
diff --git a/Custom-Ocean.xyz-Dashboard/minify.py b/Custom-Ocean.xyz-Dashboard/minify.py
deleted file mode 100644
index 41d9962..0000000
--- a/Custom-Ocean.xyz-Dashboard/minify.py
+++ /dev/null
@@ -1,76 +0,0 @@
-import os
-import htmlmin
-import logging
-
-# Set up logging
-logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
-
-TEMPLATES_DIR = "templates"
-HTML_FILES = ["index.html", "error.html"]
-
-def minify_html_file(file_path):
- """
- Minify an HTML file with error handling
- """
- try:
- with open(file_path, "r", encoding="utf-8") as f:
- content = f.read()
-
- # Check if file has content
- if not content.strip():
- logging.warning(f"File {file_path} is empty. Skipping.")
- return
-
- # Minify the content
- try:
- minified = htmlmin.minify(content,
- remove_comments=True,
- remove_empty_space=True,
- remove_all_empty_space=False,
- reduce_boolean_attributes=True)
-
- # Make sure minification worked and didn't remove everything
- if not minified.strip():
- logging.error(f"Minification of {file_path} resulted in empty content. Using original.")
- minified = content
-
- # Write back the minified content
- with open(file_path, "w", encoding="utf-8") as f:
- f.write(minified)
-
- logging.info(f"Minified {file_path}")
-
- except Exception as e:
- logging.error(f"Error minifying {file_path}: {e}")
-
- except Exception as e:
- logging.error(f"Error reading file {file_path}: {e}")
-
-def ensure_templates_dir():
- """
- Ensure templates directory exists
- """
- if not os.path.exists(TEMPLATES_DIR):
- try:
- os.makedirs(TEMPLATES_DIR)
- logging.info(f"Created templates directory: {TEMPLATES_DIR}")
- except Exception as e:
- logging.error(f"Error creating templates directory: {e}")
- return False
- return True
-
-if __name__ == "__main__":
- logging.info("Starting HTML minification process")
-
- if not ensure_templates_dir():
- logging.error("Templates directory does not exist and could not be created. Exiting.")
- exit(1)
-
- for filename in HTML_FILES:
- file_path = os.path.join(TEMPLATES_DIR, filename)
- if os.path.exists(file_path):
- minify_html_file(file_path)
- else:
- logging.warning(f"File {file_path} not found.")
-
- logging.info("HTML minification process completed")
\ No newline at end of file
diff --git a/Custom-Ocean.xyz-Dashboard/requirements.txt b/Custom-Ocean.xyz-Dashboard/requirements.txt
deleted file mode 100644
index a4f96ec..0000000
--- a/Custom-Ocean.xyz-Dashboard/requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-Flask==2.3.3
-requests==2.31.0
-beautifulsoup4==4.12.2
-Flask-Caching==2.1.0
-gunicorn==22.0.0
-htmlmin==0.1.12
-redis==5.0.1
-APScheduler==3.10.4
-psutil==5.9.5
\ No newline at end of file
diff --git a/Custom-Ocean.xyz-Dashboard/static/css/retro-refresh.css b/Custom-Ocean.xyz-Dashboard/static/css/retro-refresh.css
deleted file mode 100644
index 9e987ff..0000000
--- a/Custom-Ocean.xyz-Dashboard/static/css/retro-refresh.css
+++ /dev/null
@@ -1,369 +0,0 @@
-/* Retro Floating Refresh Bar Styles */
-:root {
- --terminal-bg: #000000;
- --terminal-border: #f7931a;
- --terminal-text: #f7931a;
- --terminal-glow: rgba(247, 147, 26, 0.7);
- --terminal-width: 300px;
-}
-
-/* Adjust width for desktop */
-@media (min-width: 768px) {
- :root {
- --terminal-width: 340px;
- }
-}
-
-/* Remove the existing refresh timer container styles */
-#refreshUptime {
- visibility: hidden !important;
- height: 0 !important;
- overflow: hidden !important;
- margin: 0 !important;
- padding: 0 !important;
-}
-
-/* Add padding to the bottom of the page to prevent floating bar from covering content
-body {
- padding-bottom: 100px !important;
-}
-*/
-/* Floating Retro Terminal Container */
-#retro-terminal-bar {
- position: fixed;
- bottom: 20px;
- left: 50%;
- transform: translateX(-50%);
- width: var(--terminal-width);
- background-color: var(--terminal-bg);
- border: 2px solid var(--terminal-border);
- /* box-shadow: 0 0 15px var(--terminal-glow); */
- z-index: 1000;
- font-family: 'VT323', monospace;
- overflow: hidden;
- padding: 5px;
-}
-
-/* Desktop positioning (bottom right) */
-@media (min-width: 768px) {
- #retro-terminal-bar {
- left: auto;
- right: 20px;
- transform: none;
- }
-}
-
-/* Terminal header with control buttons */
-/* Update the terminal title to match card headers */
-.terminal-header {
- display: flex;
- justify-content: space-between;
- align-items: center;
- margin-bottom: 5px;
- border-bottom: 1px solid var(--terminal-border);
- padding-bottom: 3px;
- background-color: #000; /* Match card header background */
-}
-
-.terminal-title {
- color: var(--primary-color);
- font-weight: bold;
- font-size: 1.1rem; /* Match card header font size */
- border-bottom: none;
- text-shadow: 0 0 5px var(--primary-color);
- animation: flicker 4s infinite; /* Add flicker animation from card headers */
- font-family: var(--header-font); /* Use the same font variable */
- padding: 0.3rem 0; /* Match card header padding */
- letter-spacing: 1px;
-}
-
-/* Make sure we're using the flicker animation defined in the main CSS */
-@keyframes flicker {
- 0% { opacity: 0.97; }
- 5% { opacity: 0.95; }
- 10% { opacity: 0.97; }
- 15% { opacity: 0.94; }
- 20% { opacity: 0.98; }
- 50% { opacity: 0.95; }
- 80% { opacity: 0.96; }
- 90% { opacity: 0.94; }
- 100% { opacity: 0.98; }
-}
-
-.terminal-controls {
- display: flex;
- gap: 5px;
-}
-
-.terminal-dot {
- width: 8px;
- height: 8px;
- border-radius: 50%;
- background-color: #555;
- transition: background-color 0.3s;
-}
-
-.terminal-dot:hover {
- background-color: #999;
- cursor: pointer;
-}
-
-.terminal-dot.minimize:hover {
- background-color: #ffcc00;
-}
-
-.terminal-dot.close:hover {
- background-color: #ff3b30;
-}
-
-/* Terminal content area */
-.terminal-content {
- position: relative;
- color: #ffffff;
- padding: 5px 0;
-}
-
-/* Scanline effect for authentic CRT look */
-.terminal-content::before {
- content: '';
- position: absolute;
- top: 0;
- left: 0;
- right: 0;
- bottom: 0;
- background: repeating-linear-gradient(
- 0deg,
- rgba(0, 0, 0, 0.15),
- rgba(0, 0, 0, 0.15) 1px,
- transparent 1px,
- transparent 2px
- );
- pointer-events: none;
- z-index: 1;
- animation: flicker 0.15s infinite;
-}
-
-@keyframes flicker {
- 0% { opacity: 1.0; }
- 50% { opacity: 0.98; }
- 100% { opacity: 1.0; }
-}
-
-/* Enhanced Progress Bar with tick marks */
-#retro-terminal-bar .bitcoin-progress-container {
- width: 100%;
- height: 20px;
- background-color: #111;
- border: 1px solid var(--terminal-border);
- margin-bottom: 10px;
- position: relative;
- overflow: hidden;
- box-shadow: inset 0 0 10px rgba(0, 0, 0, 0.8);
-}
-
-/* Tick marks on progress bar */
-.progress-ticks {
- position: absolute;
- top: 0;
- left: 0;
- width: 100%;
- height: 100%;
- display: flex;
- justify-content: space-between;
- padding: 0 5px;
- color: rgba(255, 255, 255, 0.6);
- font-size: 10px;
- pointer-events: none;
- z-index: 3;
-}
-
-.progress-ticks span {
- display: flex;
- align-items: flex-end;
- height: 100%;
- padding-bottom: 2px;
-}
-
-.tick-mark {
- position: absolute;
- top: 0;
- width: 1px;
- height: 5px;
- background-color: rgba(255, 255, 255, 0.4);
-}
-
-.tick-mark.major {
- height: 8px;
- background-color: rgba(255, 255, 255, 0.6);
-}
-
-/* The actual progress bar */
-#retro-terminal-bar #bitcoin-progress-inner {
- height: 100%;
- width: 0;
- background: linear-gradient(90deg, #f7931a, #ffa500);
- position: relative;
- transition: width 1s linear;
-}
-
-/* Position the original inner container correctly */
-#retro-terminal-bar #refreshContainer {
- display: block;
- width: 100%;
-}
-
-/* Blinking scan line animation */
-.scan-line {
- position: absolute;
- height: 2px;
- width: 100%;
- background-color: rgba(255, 255, 255, 0.7);
- animation: scan 3s linear infinite;
- box-shadow: 0 0 8px 1px rgba(255, 255, 255, 0.5);
- z-index: 2;
-}
-
-@keyframes scan {
- 0% { top: -2px; }
- 100% { top: 22px; }
-}
-
-/* Text styling */
-#retro-terminal-bar #progress-text {
- font-size: 16px;
- color: var(--terminal-text);
- text-shadow: 0 0 5px var(--terminal-text);
- margin-top: 5px;
- text-align: center;
- position: relative;
- z-index: 2;
-}
-
-#retro-terminal-bar #uptimeTimer {
- font-size: 16px;
- color: var(--terminal-text);
- text-shadow: 0 0 5px var(--terminal-text);
- text-align: center;
- position: relative;
- z-index: 2;
- border-top: 1px solid rgba(247, 147, 26, 0.3);
- padding-top: 5px;
- margin-top: 5px;
-}
-
-/* Terminal cursor */
-#retro-terminal-bar #terminal-cursor {
- display: inline-block;
- width: 8px;
- height: 14px;
- background-color: var(--terminal-text);
- margin-left: 2px;
- animation: blink 1s step-end infinite;
- box-shadow: 0 0 8px var(--terminal-text);
-}
-
-/* Glowing effect during the last few seconds */
-#retro-terminal-bar #bitcoin-progress-inner.glow-effect {
- box-shadow: 0 0 15px #f7931a, 0 0 25px #f7931a;
-}
-
-#retro-terminal-bar .waiting-for-update {
- animation: waitingPulse 2s infinite !important;
-}
-
-@keyframes waitingPulse {
- 0%, 100% { box-shadow: 0 0 10px #f7931a, 0 0 15px #f7931a; opacity: 0.8; }
- 50% { box-shadow: 0 0 20px #f7931a, 0 0 35px #f7931a; opacity: 1; }
-}
-
-/* Status indicators */
-.status-indicators {
- display: flex;
- justify-content: space-between;
- margin-bottom: 5px;
- font-size: 12px;
- color: #aaa;
-}
-
-.status-indicator {
- display: flex;
- align-items: center;
-}
-
-.status-dot {
- width: 6px;
- height: 6px;
- border-radius: 50%;
- margin-right: 4px;
-}
-
-.status-dot.connected {
- background-color: #32CD32;
- box-shadow: 0 0 5px #32CD32;
- animation: pulse 2s infinite;
-}
-
-@keyframes pulse {
- 0% { opacity: 0.8; }
- 50% { opacity: 1; }
- 100% { opacity: 0.8; }
-}
-
-/* Collapse/expand functionality */
-#retro-terminal-bar.collapsed .terminal-content {
- display: none;
-}
-
-#retro-terminal-bar.collapsed {
- width: 180px;
-}
-
-/* On desktop, move the collapsed bar to bottom right */
-@media (min-width: 768px) {
- #retro-terminal-bar.collapsed {
- right: 20px;
- transform: none;
- }
-}
-
-/* Show button */
-#show-terminal-button {
- position: fixed;
- bottom: 10px;
- right: 10px;
- z-index: 1000;
- background-color: #f7931a;
- color: #000;
- border: none;
- padding: 8px 12px;
- cursor: pointer;
- font-family: 'VT323', monospace;
- font-size: 14px;
- box-shadow: 0 0 10px rgba(247, 147, 26, 0.5);
-}
-
-#show-terminal-button:hover {
- background-color: #ffaa33;
-}
-
-/* Mobile responsiveness */
-@media (max-width: 576px) {
- #retro-terminal-bar {
- width: 280px;
- bottom: 10px;
- }
-
- .terminal-title {
- font-size: 14px;
- }
-
- .terminal-dot {
- width: 6px;
- height: 6px;
- }
-
- #show-terminal-button {
- padding: 6px 10px;
- font-size: 12px;
- }
-}
\ No newline at end of file
diff --git a/Custom-Ocean.xyz-Dashboard/static/js/main.js b/Custom-Ocean.xyz-Dashboard/static/js/main.js
deleted file mode 100644
index 3ddad24..0000000
--- a/Custom-Ocean.xyz-Dashboard/static/js/main.js
+++ /dev/null
@@ -1,801 +0,0 @@
-"use strict";
-
-// Global variables
-let previousMetrics = {};
-let persistentArrows = {};
-let serverTimeOffset = 0;
-let serverStartTime = null;
-let latestMetrics = null;
-let initialLoad = true;
-let trendData = [];
-let trendLabels = [];
-let trendChart = null;
-let connectionRetryCount = 0;
-let maxRetryCount = 10;
-let reconnectionDelay = 1000; // Start with 1 second
-let pingInterval = null;
-let lastPingTime = Date.now();
-let connectionLostTimeout = null;
-
-// Bitcoin-themed progress bar functionality
-let progressInterval;
-let currentProgress = 0;
-let lastUpdateTime = Date.now();
-let expectedUpdateInterval = 60000; // Expected server update interval (60 seconds)
-const PROGRESS_MAX = 60; // 60 seconds for a complete cycle
-
-// Initialize the progress bar and start the animation
-function initProgressBar() {
- // Clear any existing interval
- if (progressInterval) {
- clearInterval(progressInterval);
- }
-
- // Set last update time to now
- lastUpdateTime = Date.now();
-
- // Reset progress with initial offset
- currentProgress = 1; // Start at 1 instead of 0 for offset
- updateProgressBar(currentProgress);
-
- // Start the interval
- progressInterval = setInterval(function() {
- // Calculate elapsed time since last update
- const elapsedTime = Date.now() - lastUpdateTime;
-
- // Calculate progress percentage based on elapsed time with +1 second offset
- const secondsElapsed = Math.floor(elapsedTime / 1000) + 1; // Add 1 second offset
-
- // If we've gone past the expected update time
- if (secondsElapsed >= PROGRESS_MAX) {
- // Keep the progress bar full but show waiting state
- currentProgress = PROGRESS_MAX;
- } else {
- // Normal progress with offset
- currentProgress = secondsElapsed;
- }
-
- updateProgressBar(currentProgress);
- }, 1000);
-}
-
-// Update the progress bar display
-function updateProgressBar(seconds) {
- const progressPercent = (seconds / PROGRESS_MAX) * 100;
- $("#bitcoin-progress-inner").css("width", progressPercent + "%");
-
- // Add glowing effect when close to completion
- if (progressPercent > 80) {
- $("#bitcoin-progress-inner").addClass("glow-effect");
- } else {
- $("#bitcoin-progress-inner").removeClass("glow-effect");
- }
-
- // Update remaining seconds text - more precise calculation
- let remainingSeconds = PROGRESS_MAX - seconds;
-
- // When we're past the expected time, show "Waiting for update..."
- if (remainingSeconds <= 0) {
- $("#progress-text").text("Waiting for update...");
- $("#bitcoin-progress-inner").addClass("waiting-for-update");
- } else {
- $("#progress-text").text(remainingSeconds + "s to next update");
- $("#bitcoin-progress-inner").removeClass("waiting-for-update");
- }
-}
-
-// Register Chart.js annotation plugin if available
-if (window['chartjs-plugin-annotation']) {
- Chart.register(window['chartjs-plugin-annotation']);
-}
-
-// SSE Connection with Error Handling and Reconnection Logic
-function setupEventSource() {
- console.log("Setting up EventSource connection...");
-
- if (window.eventSource) {
- console.log("Closing existing EventSource connection");
- window.eventSource.close();
- window.eventSource = null;
- }
-
- // Always use absolute URL with origin to ensure it works from any path
- const baseUrl = window.location.origin;
- const streamUrl = `${baseUrl}/stream`;
-
- console.log("Current path:", window.location.pathname);
- console.log("Using stream URL:", streamUrl);
-
- // Clear any existing ping interval
- if (pingInterval) {
- clearInterval(pingInterval);
- pingInterval = null;
- }
-
- // Clear any connection lost timeout
- if (connectionLostTimeout) {
- clearTimeout(connectionLostTimeout);
- connectionLostTimeout = null;
- }
-
- try {
- const eventSource = new EventSource(streamUrl);
-
- eventSource.onopen = function(e) {
- console.log("EventSource connection opened successfully");
- connectionRetryCount = 0; // Reset retry count on successful connection
- reconnectionDelay = 1000; // Reset reconnection delay
- hideConnectionIssue();
-
- // Start ping interval to detect dead connections
- lastPingTime = Date.now();
- pingInterval = setInterval(function() {
- const now = Date.now();
- if (now - lastPingTime > 60000) { // 60 seconds without data
- console.warn("No data received for 60 seconds, reconnecting...");
- showConnectionIssue("Connection stalled");
- eventSource.close();
- setupEventSource();
- }
- }, 10000); // Check every 10 seconds
- };
-
- eventSource.onmessage = function(e) {
- console.log("SSE message received");
- lastPingTime = Date.now(); // Update ping time on any message
-
- try {
- const data = JSON.parse(e.data);
-
- // Handle different message types
- if (data.type === "ping") {
- console.log("Ping received:", data);
- // Update connection count if available
- if (data.connections !== undefined) {
- console.log(`Active connections: ${data.connections}`);
- }
- return;
- }
-
- if (data.type === "timeout_warning") {
- console.log(`Connection timeout warning: ${data.remaining}s remaining`);
- // If less than 30 seconds remaining, prepare for reconnection
- if (data.remaining < 30) {
- console.log("Preparing for reconnection due to upcoming timeout");
- }
- return;
- }
-
- if (data.type === "timeout") {
- console.log("Connection timeout from server:", data.message);
- eventSource.close();
- // If reconnect flag is true, reconnect immediately
- if (data.reconnect) {
- console.log("Server requested reconnection");
- setTimeout(setupEventSource, 500);
- } else {
- setupEventSource();
- }
- return;
- }
-
- if (data.error) {
- console.error("Server reported error:", data.error);
- showConnectionIssue(data.error);
-
- // If retry time provided, use it, otherwise use default
- const retryTime = data.retry || 5000;
- setTimeout(function() {
- manualRefresh();
- }, retryTime);
- return;
- }
-
- // Process regular data update
- latestMetrics = data;
- updateUI();
- hideConnectionIssue();
-
- // Also explicitly trigger a data refresh event
- $(document).trigger('dataRefreshed');
- } catch (err) {
- console.error("Error processing SSE data:", err);
- showConnectionIssue("Data processing error");
- }
- };
-
- eventSource.onerror = function(e) {
- console.error("SSE connection error", e);
- showConnectionIssue("Connection lost");
-
- eventSource.close();
-
- // Implement exponential backoff for reconnection
- connectionRetryCount++;
-
- if (connectionRetryCount > maxRetryCount) {
- console.log("Maximum retry attempts reached, switching to polling mode");
- if (pingInterval) {
- clearInterval(pingInterval);
- pingInterval = null;
- }
-
- // Switch to regular polling
- showConnectionIssue("Using polling mode");
- setInterval(manualRefresh, 30000); // Poll every 30 seconds
- manualRefresh(); // Do an immediate refresh
- return;
- }
-
- // Exponential backoff with jitter
- const jitter = Math.random() * 0.3 + 0.85; // 0.85-1.15
- reconnectionDelay = Math.min(30000, reconnectionDelay * 1.5 * jitter);
-
- console.log(`Reconnecting in ${(reconnectionDelay/1000).toFixed(1)} seconds... (attempt ${connectionRetryCount}/${maxRetryCount})`);
- setTimeout(setupEventSource, reconnectionDelay);
- };
-
- window.eventSource = eventSource;
- console.log("EventSource setup complete");
-
- // Set a timeout to detect if connection is established
- connectionLostTimeout = setTimeout(function() {
- if (eventSource.readyState !== 1) { // 1 = OPEN
- console.warn("Connection not established within timeout, switching to manual refresh");
- showConnectionIssue("Connection timeout");
- eventSource.close();
- manualRefresh();
- }
- }, 10000); // 10 seconds timeout to establish connection
-
- } catch (error) {
- console.error("Failed to create EventSource:", error);
- showConnectionIssue("Connection setup failed");
- setTimeout(setupEventSource, 5000); // Try again in 5 seconds
- }
-
- // Add page visibility change listener
- // This helps reconnect when user returns to the tab after it's been inactive
- document.removeEventListener("visibilitychange", handleVisibilityChange);
- document.addEventListener("visibilitychange", handleVisibilityChange);
-}
-
-// Handle page visibility changes
-function handleVisibilityChange() {
- if (!document.hidden) {
- console.log("Page became visible, checking connection");
- if (!window.eventSource || window.eventSource.readyState !== 1) {
- console.log("Connection not active, reestablishing");
- setupEventSource();
- }
- manualRefresh(); // Always refresh data when page becomes visible
- }
-}
-
-// Helper function to show connection issues to the user
-function showConnectionIssue(message) {
- let $connectionStatus = $("#connectionStatus");
- if (!$connectionStatus.length) {
- $("body").append('
');
- $connectionStatus = $("#connectionStatus");
- }
- $connectionStatus.html(` ${message}`).show();
-
- // Show manual refresh button when there are connection issues
- $("#refreshButton").show();
-}
-
-// Helper function to hide connection issue message
-function hideConnectionIssue() {
- $("#connectionStatus").hide();
- $("#refreshButton").hide();
-}
-
-// Improved manual refresh function as fallback
-function manualRefresh() {
- console.log("Manually refreshing data...");
-
- $.ajax({
- url: '/api/metrics',
- method: 'GET',
- dataType: 'json',
- timeout: 15000, // 15 second timeout
- success: function(data) {
- console.log("Manual refresh successful");
- lastPingTime = Date.now(); // Update ping time
- latestMetrics = data;
- updateUI();
- hideConnectionIssue();
-
- // Explicitly trigger data refresh event
- $(document).trigger('dataRefreshed');
- },
- error: function(xhr, status, error) {
- console.error("Manual refresh failed:", error);
- showConnectionIssue("Manual refresh failed");
-
- // Try again with exponential backoff
- const retryDelay = Math.min(30000, 1000 * Math.pow(1.5, Math.min(5, connectionRetryCount)));
- connectionRetryCount++;
- setTimeout(manualRefresh, retryDelay);
- }
- });
-}
-
-// Initialize Chart.js with Error Handling
-function initializeChart() {
- try {
- const ctx = document.getElementById('trendGraph').getContext('2d');
- if (!ctx) {
- console.error("Could not find trend graph canvas");
- return null;
- }
-
- if (!window.Chart) {
- console.error("Chart.js not loaded");
- return null;
- }
-
- // Check if Chart.js plugin is available
- const hasAnnotationPlugin = window['chartjs-plugin-annotation'] !== undefined;
-
- return new Chart(ctx, {
- type: 'line',
- data: {
- labels: [],
- datasets: [{
- label: '60s Hashrate Trend (TH/s)',
- data: [],
- borderColor: '#f7931a',
- backgroundColor: 'rgba(247,147,26,0.1)',
- fill: true,
- tension: 0.2,
- }]
- },
- options: {
- responsive: true,
- maintainAspectRatio: false,
- animation: {
- duration: 0 // Disable animations for better performance
- },
- scales: {
- x: { display: false },
- y: {
- ticks: { color: 'white' },
- grid: { color: '#333' }
- }
- },
- plugins: {
- legend: { display: false },
- annotation: hasAnnotationPlugin ? {
- annotations: {
- averageLine: {
- type: 'line',
- yMin: 0,
- yMax: 0,
- borderColor: '#f7931a',
- borderWidth: 2,
- borderDash: [6, 6],
- label: {
- enabled: true,
- content: '24hr Avg: 0 TH/s',
- backgroundColor: 'rgba(0,0,0,0.7)',
- color: '#f7931a',
- font: { weight: 'bold', size: 13 },
- position: 'start'
- }
- }
- }
- } : {}
- }
- }
- });
- } catch (error) {
- console.error("Error initializing chart:", error);
- return null;
- }
-}
-
-// Helper function to safely format numbers with commas
-function numberWithCommas(x) {
- if (x == null) return "N/A";
- return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
-}
-
-// Server time update via polling
-function updateServerTime() {
- $.ajax({
- url: "/api/time",
- method: "GET",
- timeout: 5000,
- success: function(data) {
- serverTimeOffset = new Date(data.server_timestamp).getTime() - Date.now();
- serverStartTime = new Date(data.server_start_time).getTime();
- },
- error: function(jqXHR, textStatus, errorThrown) {
- console.error("Error fetching server time:", textStatus, errorThrown);
- }
- });
-}
-
-// Update uptime display
-function updateUptime() {
- if (serverStartTime) {
- const currentServerTime = Date.now() + serverTimeOffset;
- const diff = currentServerTime - serverStartTime;
- const hours = Math.floor(diff / (1000 * 60 * 60));
- const minutes = Math.floor((diff % (1000 * 60 * 60)) / (1000 * 60));
- const seconds = Math.floor((diff % (1000 * 60)) / 1000);
- $("#uptimeTimer").html("Uptime: " + hours + "h " + minutes + "m " + seconds + "s");
- }
-}
-
-// Update UI indicators (arrows)
-function updateIndicators(newMetrics) {
- const keys = [
- "pool_total_hashrate", "hashrate_24hr", "hashrate_3hr", "hashrate_10min",
- "hashrate_60sec", "block_number", "btc_price", "network_hashrate",
- "difficulty", "daily_revenue", "daily_power_cost", "daily_profit_usd",
- "monthly_profit_usd", "daily_mined_sats", "monthly_mined_sats", "unpaid_earnings",
- "estimated_earnings_per_day_sats", "estimated_earnings_next_block_sats", "estimated_rewards_in_window_sats",
- "workers_hashing"
- ];
-
- keys.forEach(function(key) {
- const newVal = parseFloat(newMetrics[key]);
- if (isNaN(newVal)) return;
-
- const oldVal = parseFloat(previousMetrics[key]);
- if (!isNaN(oldVal)) {
- if (newVal > oldVal) {
- persistentArrows[key] = " ";
- } else if (newVal < oldVal) {
- persistentArrows[key] = " ";
- }
- } else {
- if (newMetrics.arrow_history && newMetrics.arrow_history[key] && newMetrics.arrow_history[key].length > 0) {
- const historyArr = newMetrics.arrow_history[key];
- for (let i = historyArr.length - 1; i >= 0; i--) {
- if (historyArr[i].arrow !== "") {
- if (historyArr[i].arrow === "↑") {
- persistentArrows[key] = " ";
- } else if (historyArr[i].arrow === "↓") {
- persistentArrows[key] = " ";
- }
- break;
- }
- }
- }
- }
-
- const indicator = document.getElementById("indicator_" + key);
- if (indicator) {
- indicator.innerHTML = persistentArrows[key] || "";
- }
- });
-
- previousMetrics = { ...newMetrics };
-}
-
-// Helper function to safely update element text content
-function updateElementText(elementId, text) {
- const element = document.getElementById(elementId);
- if (element) {
- element.textContent = text;
- }
-}
-
-// Helper function to safely update element HTML content
-function updateElementHTML(elementId, html) {
- const element = document.getElementById(elementId);
- if (element) {
- element.innerHTML = html;
- }
-}
-
-// Update workers_hashing value from metrics, but don't try to access worker details
-function updateWorkersCount() {
- if (latestMetrics && latestMetrics.workers_hashing !== undefined) {
- $("#workers_hashing").text(latestMetrics.workers_hashing || 0);
-
- // Update miner status with online/offline indicator based on worker count
- if (latestMetrics.workers_hashing > 0) {
- updateElementHTML("miner_status", "ONLINE ");
- } else {
- updateElementHTML("miner_status", "OFFLINE ");
- }
- }
-}
-
-// Check for block updates and show congratulatory messages
-function checkForBlockUpdates(data) {
- if (previousMetrics.last_block_height !== undefined &&
- data.last_block_height !== previousMetrics.last_block_height) {
- showCongrats("Congrats! New Block Found: " + data.last_block_height);
- }
-
- if (previousMetrics.blocks_found !== undefined &&
- data.blocks_found !== previousMetrics.blocks_found) {
- showCongrats("Congrats! Blocks Found updated: " + data.blocks_found);
- }
-}
-
-// Helper function to show congratulatory messages
-function showCongrats(message) {
- const $congrats = $("#congratsMessage");
- $congrats.text(message).fadeIn(500, function() {
- setTimeout(function() {
- $congrats.fadeOut(500);
- }, 3000);
- });
-}
-
-// Main UI update function
-function updateUI() {
- if (!latestMetrics) {
- console.warn("No metrics data available");
- return;
- }
-
- try {
- const data = latestMetrics;
-
- // If there's execution time data, log it
- if (data.execution_time) {
- console.log(`Server metrics fetch took ${data.execution_time.toFixed(2)}s`);
- }
-
- // Cache jQuery selectors for performance and use safe update methods
- updateElementText("pool_total_hashrate",
- (data.pool_total_hashrate != null ? data.pool_total_hashrate : "N/A") + " " +
- (data.pool_total_hashrate_unit ? data.pool_total_hashrate_unit.slice(0,-2).toUpperCase() + data.pool_total_hashrate_unit.slice(-2) : "")
- );
-
- updateElementText("hashrate_24hr",
- (data.hashrate_24hr != null ? data.hashrate_24hr : "N/A") + " " +
- (data.hashrate_24hr_unit ? data.hashrate_24hr_unit.slice(0,-2).toUpperCase() + data.hashrate_24hr_unit.slice(-2) : "")
- );
-
- updateElementText("hashrate_3hr",
- (data.hashrate_3hr != null ? data.hashrate_3hr : "N/A") + " " +
- (data.hashrate_3hr_unit ? data.hashrate_3hr_unit.slice(0,-2).toUpperCase() + data.hashrate_3hr_unit.slice(-2) : "")
- );
-
- updateElementText("hashrate_10min",
- (data.hashrate_10min != null ? data.hashrate_10min : "N/A") + " " +
- (data.hashrate_10min_unit ? data.hashrate_10min_unit.slice(0,-2).toUpperCase() + data.hashrate_10min_unit.slice(-2) : "")
- );
-
- updateElementText("hashrate_60sec",
- (data.hashrate_60sec != null ? data.hashrate_60sec : "N/A") + " " +
- (data.hashrate_60sec_unit ? data.hashrate_60sec_unit.slice(0,-2).toUpperCase() + data.hashrate_60sec_unit.slice(-2) : "")
- );
-
- updateElementText("block_number", numberWithCommas(data.block_number));
-
- updateElementText("btc_price",
- data.btc_price != null ? "$" + numberWithCommas(parseFloat(data.btc_price).toFixed(2)) : "N/A"
- );
-
- updateElementText("network_hashrate", numberWithCommas(Math.round(data.network_hashrate)) + " EH/s");
- updateElementText("difficulty", numberWithCommas(Math.round(data.difficulty)));
- updateElementText("daily_revenue", "$" + numberWithCommas(data.daily_revenue.toFixed(2)));
- updateElementText("daily_power_cost", "$" + numberWithCommas(data.daily_power_cost.toFixed(2)));
- updateElementText("daily_profit_usd", "$" + numberWithCommas(data.daily_profit_usd.toFixed(2)));
- updateElementText("monthly_profit_usd", "$" + numberWithCommas(data.monthly_profit_usd.toFixed(2)));
- updateElementText("daily_mined_sats", numberWithCommas(data.daily_mined_sats) + " sats");
- updateElementText("monthly_mined_sats", numberWithCommas(data.monthly_mined_sats) + " sats");
-
- // Update worker count from metrics (just the number, not full worker data)
- updateWorkersCount();
-
- updateElementText("unpaid_earnings", data.unpaid_earnings + " BTC");
-
- // Update payout estimation with color coding
- const payoutText = data.est_time_to_payout;
- updateElementText("est_time_to_payout", payoutText);
-
- if (payoutText && payoutText.toLowerCase().includes("next block")) {
- $("#est_time_to_payout").css({
- "color": "#32CD32",
- "animation": "glowPulse 1s infinite"
- });
- } else {
- const days = parseFloat(payoutText);
- if (!isNaN(days)) {
- if (days < 4) {
- $("#est_time_to_payout").css({"color": "#32CD32", "animation": "none"});
- } else if (days > 20) {
- $("#est_time_to_payout").css({"color": "red", "animation": "none"});
- } else {
- $("#est_time_to_payout").css({"color": "#ffd700", "animation": "none"});
- }
- } else {
- $("#est_time_to_payout").css({"color": "#ffd700", "animation": "none"});
- }
- }
-
- updateElementText("last_block_height", data.last_block_height || "");
- updateElementText("last_block_time", data.last_block_time || "");
- updateElementText("blocks_found", data.blocks_found || "0");
- updateElementText("last_share", data.total_last_share || "");
-
- // Update Estimated Earnings metrics
- updateElementText("estimated_earnings_per_day_sats", numberWithCommas(data.estimated_earnings_per_day_sats) + " sats");
- updateElementText("estimated_earnings_next_block_sats", numberWithCommas(data.estimated_earnings_next_block_sats) + " sats");
- updateElementText("estimated_rewards_in_window_sats", numberWithCommas(data.estimated_rewards_in_window_sats) + " sats");
-
- // Update last updated timestamp
- const now = new Date(Date.now() + serverTimeOffset);
- updateElementHTML("lastUpdated", "Last Updated: " + now.toLocaleString() + " ");
-
- // Update chart if it exists
- if (trendChart) {
- try {
- // Always update the 24hr average line even if we don't have data points yet
- const avg24hr = parseFloat(data.hashrate_24hr || 0);
- if (!isNaN(avg24hr) &&
- trendChart.options.plugins.annotation &&
- trendChart.options.plugins.annotation.annotations &&
- trendChart.options.plugins.annotation.annotations.averageLine) {
- const annotation = trendChart.options.plugins.annotation.annotations.averageLine;
- annotation.yMin = avg24hr;
- annotation.yMax = avg24hr;
- annotation.label.content = '24hr Avg: ' + avg24hr + ' TH/s';
- }
-
- // Update data points if we have any (removed minimum length requirement)
- if (data.arrow_history && data.arrow_history.hashrate_60sec) {
- const historyData = data.arrow_history.hashrate_60sec;
- if (historyData && historyData.length > 0) {
- console.log(`Updating chart with ${historyData.length} data points`);
- trendChart.data.labels = historyData.map(item => item.time);
- trendChart.data.datasets[0].data = historyData.map(item => {
- const val = parseFloat(item.value);
- return isNaN(val) ? 0 : val;
- });
- } else {
- console.log("No history data points available yet");
- }
- } else {
- console.log("No hashrate_60sec history available yet");
-
- // If there's no history data, create a starting point using current hashrate
- if (data.hashrate_60sec) {
- const currentTime = new Date().toLocaleTimeString('en-US', {hour12: false, hour: '2-digit', minute: '2-digit'});
- trendChart.data.labels = [currentTime];
- trendChart.data.datasets[0].data = [parseFloat(data.hashrate_60sec) || 0];
- console.log("Created initial data point with current hashrate");
- }
- }
-
- // Always update the chart, even if we just updated the average line
- trendChart.update('none');
- } catch (chartError) {
- console.error("Error updating chart:", chartError);
- }
- }
-
- // Update indicators and check for block updates
- updateIndicators(data);
- checkForBlockUpdates(data);
-
- } catch (error) {
- console.error("Error updating UI:", error);
- }
-}
-
-// Set up refresh synchronization
-function setupRefreshSync() {
- // Listen for the dataRefreshed event
- $(document).on('dataRefreshed', function() {
- // Broadcast to any other open tabs/pages that the data has been refreshed
- try {
- // Store the current timestamp to localStorage
- localStorage.setItem('dashboardRefreshTime', Date.now().toString());
-
- // Create a custom event that can be detected across tabs/pages
- localStorage.setItem('dashboardRefreshEvent', 'refresh-' + Date.now());
-
- console.log("Dashboard refresh synchronized");
- } catch (e) {
- console.error("Error synchronizing refresh:", e);
- }
- });
-}
-
-// Document ready initialization
-$(document).ready(function() {
- // Initialize the chart
- trendChart = initializeChart();
-
- // Initialize the progress bar
- initProgressBar();
-
- // Set up direct monitoring of data refreshes
- $(document).on('dataRefreshed', function() {
- console.log("Data refresh event detected, resetting progress bar");
- lastUpdateTime = Date.now();
- currentProgress = 0;
- updateProgressBar(currentProgress);
- });
-
- // Wrap the updateUI function to detect changes and trigger events
- const originalUpdateUI = updateUI;
- updateUI = function() {
- const previousMetricsTimestamp = latestMetrics ? latestMetrics.server_timestamp : null;
-
- // Call the original function
- originalUpdateUI.apply(this, arguments);
-
- // Check if we got new data by comparing timestamps
- if (latestMetrics && latestMetrics.server_timestamp !== previousMetricsTimestamp) {
- console.log("New data detected, triggering refresh event");
- $(document).trigger('dataRefreshed');
- }
- };
-
- // Set up event source for SSE
- setupEventSource();
-
- // Start server time polling
- updateServerTime();
- setInterval(updateServerTime, 30000);
-
- // Start uptime timer
- setInterval(updateUptime, 1000);
- updateUptime();
-
- // Set up refresh synchronization with workers page
- setupRefreshSync();
-
- // Add a manual refresh button for fallback
- $("body").append('Refresh Data ');
-
- $("#refreshButton").on("click", function() {
- $(this).text("Refreshing...");
- $(this).prop("disabled", true);
- manualRefresh();
- setTimeout(function() {
- $("#refreshButton").text("Refresh Data");
- $("#refreshButton").prop("disabled", false);
- }, 5000);
- });
-
- // Force a data refresh when the page loads
- manualRefresh();
-
- // Add emergency refresh button functionality
- $("#forceRefreshBtn").show().on("click", function() {
- $(this).text("Refreshing...");
- $(this).prop("disabled", true);
-
- $.ajax({
- url: '/api/force-refresh',
- method: 'POST',
- timeout: 15000,
- success: function(data) {
- console.log("Force refresh successful:", data);
- manualRefresh(); // Immediately get the new data
- $("#forceRefreshBtn").text("Force Refresh").prop("disabled", false);
- },
- error: function(xhr, status, error) {
- console.error("Force refresh failed:", error);
- $("#forceRefreshBtn").text("Force Refresh").prop("disabled", false);
- alert("Refresh failed: " + error);
- }
- });
- });
-
- // Add stale data detection
- setInterval(function() {
- if (latestMetrics && latestMetrics.server_timestamp) {
- const lastUpdate = new Date(latestMetrics.server_timestamp);
- const timeSinceUpdate = Math.floor((Date.now() - lastUpdate.getTime()) / 1000);
- if (timeSinceUpdate > 120) { // More than 2 minutes
- showConnectionIssue(`Data stale (${timeSinceUpdate}s old). Use Force Refresh.`);
- $("#forceRefreshBtn").show();
- }
- }
- }, 30000); // Check every 30 seconds
-});
diff --git a/Custom-Ocean.xyz-Dashboard/static/js/retro-refresh.js b/Custom-Ocean.xyz-Dashboard/static/js/retro-refresh.js
deleted file mode 100644
index af5b2e4..0000000
--- a/Custom-Ocean.xyz-Dashboard/static/js/retro-refresh.js
+++ /dev/null
@@ -1,238 +0,0 @@
-// This script integrates the retro floating refresh bar
-// with the existing dashboard and workers page functionality
-
-(function() {
- // Wait for DOM to be ready
- document.addEventListener('DOMContentLoaded', function() {
- // Create the retro terminal bar if it doesn't exist yet
- if (!document.getElementById('retro-terminal-bar')) {
- createRetroTerminalBar();
- }
-
- // Hide the original refresh container
- const originalRefreshUptime = document.getElementById('refreshUptime');
- if (originalRefreshUptime) {
- originalRefreshUptime.style.visibility = 'hidden';
- originalRefreshUptime.style.height = '0';
- originalRefreshUptime.style.overflow = 'hidden';
-
- // Important: We keep the original elements and just hide them
- // This ensures all existing JavaScript functions still work
- }
-
- // Add extra space at the bottom of the page to prevent the floating bar from covering content
- const extraSpace = document.createElement('div');
- extraSpace.style.height = '100px';
- document.body.appendChild(extraSpace);
- });
-
- // Function to create the retro terminal bar
- function createRetroTerminalBar() {
- // Get the HTML content from the shared CSS/HTML
- const html = `
-
-
-
-
-
-
-
-
-
-
- 0s
- 15s
- 30s
- 45s
- 60s
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
60s to next update
-
Uptime: 0h 0m 0s
-
-
- `;
-
- // Create a container for the HTML
- const container = document.createElement('div');
- container.innerHTML = html;
-
- // Append to the body
- document.body.appendChild(container.firstElementChild);
-
- // Start the clock update
- updateTerminalClock();
- setInterval(updateTerminalClock, 1000);
-
- // Check if terminal should be collapsed based on previous state
- const isCollapsed = localStorage.getItem('terminalCollapsed') === 'true';
- if (isCollapsed) {
- document.getElementById('retro-terminal-bar').classList.add('collapsed');
- }
- }
-
- // Function to update the terminal clock
- function updateTerminalClock() {
- const clockElement = document.getElementById('data-refresh-time');
- if (clockElement) {
- const now = new Date();
- const hours = String(now.getHours()).padStart(2, '0');
- const minutes = String(now.getMinutes()).padStart(2, '0');
- const seconds = String(now.getSeconds()).padStart(2, '0');
- clockElement.textContent = `${hours}:${minutes}:${seconds}`;
- }
- }
-
- // Expose these functions globally for the onclick handlers
- window.toggleTerminal = function() {
- const terminal = document.getElementById('retro-terminal-bar');
- terminal.classList.toggle('collapsed');
-
- // Store state in localStorage
- localStorage.setItem('terminalCollapsed', terminal.classList.contains('collapsed'));
- };
-
- window.hideTerminal = function() {
- document.getElementById('retro-terminal-bar').style.display = 'none';
-
- // Create a show button that appears at the bottom right
- const showButton = document.createElement('button');
- showButton.id = 'show-terminal-button';
- showButton.textContent = 'Show Monitor';
- showButton.style.position = 'fixed';
- showButton.style.bottom = '10px';
- showButton.style.right = '10px';
- showButton.style.zIndex = '1000';
- showButton.style.backgroundColor = '#f7931a';
- showButton.style.color = '#000';
- showButton.style.border = 'none';
- showButton.style.padding = '8px 12px';
- showButton.style.cursor = 'pointer';
- showButton.style.fontFamily = "'VT323', monospace";
- showButton.style.fontSize = '14px';
- showButton.onclick = function() {
- document.getElementById('retro-terminal-bar').style.display = 'block';
- this.remove();
- };
- document.body.appendChild(showButton);
- };
-
- // Redirect original progress bar updates to our new floating bar
- // This Observer will listen for changes to the original #bitcoin-progress-inner
- // and replicate them to our new floating bar version
- const initProgressObserver = function() {
- // Setup a MutationObserver to watch for style changes on the original progress bar
- const originalProgressBar = document.querySelector('#refreshUptime #bitcoin-progress-inner');
- const newProgressBar = document.querySelector('#retro-terminal-bar #bitcoin-progress-inner');
-
- if (originalProgressBar && newProgressBar) {
- const observer = new MutationObserver(function(mutations) {
- mutations.forEach(function(mutation) {
- if (mutation.attributeName === 'style') {
- // Get the width from the original progress bar
- const width = originalProgressBar.style.width;
- if (width) {
- // Apply it to our new progress bar
- newProgressBar.style.width = width;
-
- // Also copy any classes (like glow-effect)
- if (originalProgressBar.classList.contains('glow-effect') &&
- !newProgressBar.classList.contains('glow-effect')) {
- newProgressBar.classList.add('glow-effect');
- } else if (!originalProgressBar.classList.contains('glow-effect') &&
- newProgressBar.classList.contains('glow-effect')) {
- newProgressBar.classList.remove('glow-effect');
- }
-
- // Copy waiting-for-update class
- if (originalProgressBar.classList.contains('waiting-for-update') &&
- !newProgressBar.classList.contains('waiting-for-update')) {
- newProgressBar.classList.add('waiting-for-update');
- } else if (!originalProgressBar.classList.contains('waiting-for-update') &&
- newProgressBar.classList.contains('waiting-for-update')) {
- newProgressBar.classList.remove('waiting-for-update');
- }
- }
- }
- });
- });
-
- // Start observing
- observer.observe(originalProgressBar, { attributes: true });
- }
-
- // Also watch for changes to the progress text
- const originalProgressText = document.querySelector('#refreshUptime #progress-text');
- const newProgressText = document.querySelector('#retro-terminal-bar #progress-text');
-
- if (originalProgressText && newProgressText) {
- const textObserver = new MutationObserver(function(mutations) {
- mutations.forEach(function(mutation) {
- if (mutation.type === 'childList') {
- // Update the text in our new bar
- newProgressText.textContent = originalProgressText.textContent;
- }
- });
- });
-
- // Start observing
- textObserver.observe(originalProgressText, { childList: true, subtree: true });
- }
-
- // Watch for changes to the uptime timer
- const originalUptimeTimer = document.querySelector('#refreshUptime #uptimeTimer');
- const newUptimeTimer = document.querySelector('#retro-terminal-bar #uptimeTimer');
-
- if (originalUptimeTimer && newUptimeTimer) {
- const uptimeObserver = new MutationObserver(function(mutations) {
- mutations.forEach(function(mutation) {
- if (mutation.type === 'childList') {
- // Update the text in our new bar
- newUptimeTimer.innerHTML = originalUptimeTimer.innerHTML;
- }
- });
- });
-
- // Start observing
- uptimeObserver.observe(originalUptimeTimer, { childList: true, subtree: true });
- }
- };
-
- // Start the observer once the page is fully loaded
- window.addEventListener('load', function() {
- // Give a short delay to ensure all elements are rendered
- setTimeout(initProgressObserver, 500);
- });
-})();
\ No newline at end of file
diff --git a/Custom-Ocean.xyz-Dashboard/static/js/workers.js b/Custom-Ocean.xyz-Dashboard/static/js/workers.js
deleted file mode 100644
index 7c4feec..0000000
--- a/Custom-Ocean.xyz-Dashboard/static/js/workers.js
+++ /dev/null
@@ -1,641 +0,0 @@
-"use strict";
-
-// Global variables for workers dashboard
-let workerData = null;
-let refreshTimer;
-let pageLoadTime = Date.now();
-let currentProgress = 0;
-const PROGRESS_MAX = 60; // 60 seconds for a complete cycle
-let lastUpdateTime = Date.now();
-let filterState = {
- currentFilter: 'all',
- searchTerm: ''
-};
-let miniChart = null;
-let connectionRetryCount = 0;
-
-// Server time variables for uptime calculation - synced with main dashboard
-let serverTimeOffset = 0;
-let serverStartTime = null;
-
-// New variable to track custom refresh timing
-let lastManualRefreshTime = 0;
-const MIN_REFRESH_INTERVAL = 10000; // Minimum 10 seconds between refreshes
-
-// Initialize the page
-$(document).ready(function() {
- // Set up initial UI
- initializePage();
-
- // Get server time for uptime calculation
- updateServerTime();
-
- // Set up refresh synchronization with main dashboard
- setupRefreshSync();
-
- // Fetch worker data immediately on page load
- fetchWorkerData();
-
- // Set up refresh timer
- setInterval(updateProgressBar, 1000);
-
- // Set up uptime timer - synced with main dashboard
- setInterval(updateUptime, 1000);
-
- // Start server time polling - same as main dashboard
- setInterval(updateServerTime, 30000);
-
- // Auto-refresh worker data - aligned with main dashboard if possible
- setInterval(function() {
- // Check if it's been at least PROGRESS_MAX seconds since last update
- const timeSinceLastUpdate = Date.now() - lastUpdateTime;
- if (timeSinceLastUpdate >= PROGRESS_MAX * 1000) {
- // Check if there was a recent manual refresh
- const timeSinceManualRefresh = Date.now() - lastManualRefreshTime;
- if (timeSinceManualRefresh >= MIN_REFRESH_INTERVAL) {
- console.log("Auto-refresh triggered after time interval");
- fetchWorkerData();
- }
- }
- }, 10000); // Check every 10 seconds to align better with main dashboard
-
- // Set up filter button click handlers
- $('.filter-button').click(function() {
- $('.filter-button').removeClass('active');
- $(this).addClass('active');
- filterState.currentFilter = $(this).data('filter');
- filterWorkers();
- });
-
- // Set up search input handler
- $('#worker-search').on('input', function() {
- filterState.searchTerm = $(this).val().toLowerCase();
- filterWorkers();
- });
-});
-
-// Set up refresh synchronization with main dashboard
-function setupRefreshSync() {
- // Listen for storage events (triggered by main dashboard)
- window.addEventListener('storage', function(event) {
- // Check if this is our dashboard refresh event
- if (event.key === 'dashboardRefreshEvent') {
- console.log("Detected dashboard refresh event");
-
- // Prevent too frequent refreshes
- const now = Date.now();
- const timeSinceLastRefresh = now - lastUpdateTime;
-
- if (timeSinceLastRefresh >= MIN_REFRESH_INTERVAL) {
- console.log("Syncing refresh with main dashboard");
- // Reset progress bar and immediately fetch
- resetProgressBar();
- // Refresh the worker data
- fetchWorkerData();
- } else {
- console.log("Skipping too-frequent refresh", timeSinceLastRefresh);
- // Just reset the progress bar to match main dashboard
- resetProgressBar();
- }
- }
- });
-
- // On page load, check if we should align with main dashboard timing
- try {
- const lastDashboardRefresh = localStorage.getItem('dashboardRefreshTime');
- if (lastDashboardRefresh) {
- const lastRefreshTime = parseInt(lastDashboardRefresh);
- const timeSinceLastDashboardRefresh = Date.now() - lastRefreshTime;
-
- // If main dashboard refreshed recently, adjust our timer
- if (timeSinceLastDashboardRefresh < PROGRESS_MAX * 1000) {
- console.log("Adjusting timer to align with main dashboard");
- currentProgress = Math.floor(timeSinceLastDashboardRefresh / 1000);
- updateProgressBar(currentProgress);
-
- // Calculate when next update will happen (roughly 60 seconds from last dashboard refresh)
- const timeUntilNextRefresh = (PROGRESS_MAX * 1000) - timeSinceLastDashboardRefresh;
-
- // Schedule a one-time check near the expected refresh time
- if (timeUntilNextRefresh > 0) {
- console.log(`Scheduling coordinated refresh in ${Math.floor(timeUntilNextRefresh/1000)} seconds`);
- setTimeout(function() {
- // Check if a refresh happened in the last few seconds via localStorage event
- const newLastRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
- const secondsSinceLastRefresh = (Date.now() - newLastRefresh) / 1000;
-
- // If dashboard hasn't refreshed in the last 5 seconds, do our own refresh
- if (secondsSinceLastRefresh > 5) {
- console.log("Coordinated refresh time reached, fetching data");
- fetchWorkerData();
- } else {
- console.log("Dashboard already refreshed recently, skipping coordinated refresh");
- }
- }, timeUntilNextRefresh);
- }
- }
- }
- } catch (e) {
- console.error("Error reading dashboard refresh time:", e);
- }
-
- // Check for dashboard refresh periodically
- setInterval(function() {
- try {
- const lastDashboardRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
- const now = Date.now();
- const timeSinceLastRefresh = (now - lastUpdateTime) / 1000;
- const timeSinceDashboardRefresh = (now - lastDashboardRefresh) / 1000;
-
- // If dashboard refreshed more recently than we did and we haven't refreshed in at least 10 seconds
- if (lastDashboardRefresh > lastUpdateTime && timeSinceLastRefresh > 10) {
- console.log("Catching up with dashboard refresh");
- resetProgressBar();
- fetchWorkerData();
- }
- } catch (e) {
- console.error("Error in periodic dashboard check:", e);
- }
- }, 5000); // Check every 5 seconds
-}
-
-// Server time update via polling - same as main.js
-function updateServerTime() {
- $.ajax({
- url: "/api/time",
- method: "GET",
- timeout: 5000,
- success: function(data) {
- serverTimeOffset = new Date(data.server_timestamp).getTime() - Date.now();
- serverStartTime = new Date(data.server_start_time).getTime();
- },
- error: function(jqXHR, textStatus, errorThrown) {
- console.error("Error fetching server time:", textStatus, errorThrown);
- }
- });
-}
-
-// Update uptime display - synced with main dashboard
-function updateUptime() {
- if (serverStartTime) {
- const currentServerTime = Date.now() + serverTimeOffset;
- const diff = currentServerTime - serverStartTime;
- const hours = Math.floor(diff / (1000 * 60 * 60));
- const minutes = Math.floor((diff % (1000 * 60 * 60)) / (1000 * 60));
- const seconds = Math.floor((diff % (1000 * 60)) / 1000);
- $("#uptimeTimer").html("Uptime: " + hours + "h " + minutes + "m " + seconds + "s");
- }
-}
-
-// Initialize page elements
-function initializePage() {
- // Initialize mini chart for total hashrate if the element exists
- if (document.getElementById('total-hashrate-chart')) {
- initializeMiniChart();
- }
-
- // Show loading state
- $('#worker-grid').html(' Loading worker data...
');
-
- // Add retry button (hidden by default)
- if (!$('#retry-button').length) {
- $('body').append('Retry Loading Data ');
-
- $('#retry-button').on('click', function() {
- $(this).text('Retrying...').prop('disabled', true);
- fetchWorkerData(true);
- setTimeout(() => {
- $('#retry-button').text('Retry Loading Data').prop('disabled', false);
- }, 3000);
- });
- }
-}
-
-// Fetch worker data from API
-function fetchWorkerData(forceRefresh = false) {
- // Track this as a manual refresh for throttling purposes
- lastManualRefreshTime = Date.now();
-
- $('#worker-grid').addClass('loading-fade');
-
- // Update progress bar to show data is being fetched
- resetProgressBar();
-
- // Choose API URL based on whether we're forcing a refresh
- const apiUrl = `/api/workers${forceRefresh ? '?force=true' : ''}`;
-
- $.ajax({
- url: apiUrl,
- method: 'GET',
- dataType: 'json',
- timeout: 15000, // 15 second timeout
- success: function(data) {
- workerData = data;
- lastUpdateTime = Date.now();
-
- // Update UI with new data
- updateWorkerGrid();
- updateSummaryStats();
- updateMiniChart();
- updateLastUpdated();
-
- // Hide retry button
- $('#retry-button').hide();
-
- // Reset connection retry count
- connectionRetryCount = 0;
-
- console.log("Worker data updated successfully");
- },
- error: function(xhr, status, error) {
- console.error("Error fetching worker data:", error);
-
- // Show error in worker grid
- $('#worker-grid').html(`
-
-
-
Error loading worker data: ${error || 'Unknown error'}
-
- `);
-
- // Show retry button
- $('#retry-button').show();
-
- // Implement exponential backoff for automatic retry
- connectionRetryCount++;
- const delay = Math.min(30000, 1000 * Math.pow(1.5, Math.min(5, connectionRetryCount)));
- console.log(`Will retry in ${delay/1000} seconds (attempt ${connectionRetryCount})`);
-
- setTimeout(() => {
- fetchWorkerData(true); // Force refresh on retry
- }, delay);
- },
- complete: function() {
- $('#worker-grid').removeClass('loading-fade');
- }
- });
-}
-
-// Update the worker grid with data
-// UPDATED FUNCTION
-function updateWorkerGrid() {
- if (!workerData || !workerData.workers) {
- console.error("No worker data available");
- return;
- }
-
- const workerGrid = $('#worker-grid');
- workerGrid.empty();
-
- // Apply current filters before rendering
- const filteredWorkers = filterWorkersData(workerData.workers);
-
- if (filteredWorkers.length === 0) {
- workerGrid.html(`
-
-
-
No workers match your filter criteria
-
- `);
- return;
- }
-
- // Calculate total unpaid earnings (from the dashboard)
- const totalUnpaidEarnings = workerData.total_earnings || 0;
-
- // Sum up hashrates of online workers to calculate share percentages
- const totalHashrate = workerData.workers
- .filter(w => w.status === 'online')
- .reduce((sum, w) => sum + parseFloat(w.hashrate_3hr || 0), 0);
-
- // Calculate share percentage for each worker
- const onlineWorkers = workerData.workers.filter(w => w.status === 'online');
- const offlineWorkers = workerData.workers.filter(w => w.status === 'offline');
-
- // Allocate 95% to online workers, 5% to offline workers
- const onlinePool = totalUnpaidEarnings * 0.95;
- const offlinePool = totalUnpaidEarnings * 0.05;
-
- // Generate worker cards
- filteredWorkers.forEach(worker => {
- // Calculate earnings share based on hashrate proportion
- let earningsDisplay = worker.earnings;
-
- // Explicitly recalculate earnings share for display consistency
- if (worker.status === 'online' && totalHashrate > 0) {
- const hashrateShare = parseFloat(worker.hashrate_3hr || 0) / totalHashrate;
- earningsDisplay = (onlinePool * hashrateShare).toFixed(8);
- } else if (worker.status === 'offline' && offlineWorkers.length > 0) {
- earningsDisplay = (offlinePool / offlineWorkers.length).toFixed(8);
- }
-
- // Create worker card
- const card = $('
');
-
- // Add class based on status
- if (worker.status === 'online') {
- card.addClass('worker-card-online');
- } else {
- card.addClass('worker-card-offline');
- }
-
- // Add worker type badge
- card.append(`${worker.type}
`);
-
- // Add worker name
- card.append(`${worker.name}
`);
-
- // Add status badge
- if (worker.status === 'online') {
- card.append('ONLINE
');
- } else {
- card.append('OFFLINE
');
- }
-
- // Add hashrate bar
- const maxHashrate = 200; // TH/s - adjust based on your fleet
- const hashratePercent = Math.min(100, (worker.hashrate_3hr / maxHashrate) * 100);
- card.append(`
-
-
Hashrate (3hr):
-
${worker.hashrate_3hr} ${worker.hashrate_3hr_unit}
-
-
- `);
-
- // Add additional stats - NOTE: Using recalculated earnings
- card.append(`
-
-
-
Last Share:
-
${worker.last_share.split(' ')[1]}
-
-
-
Earnings:
-
${earningsDisplay}
-
-
-
Accept Rate:
-
${worker.acceptance_rate}%
-
-
-
Temp:
-
${worker.temperature > 0 ? worker.temperature + '°C' : 'N/A'}
-
-
- `);
-
- // Add card to grid
- workerGrid.append(card);
- });
-
- // Verify the sum of displayed earnings equals the total
- console.log(`Total unpaid earnings: ${totalUnpaidEarnings} BTC`);
- console.log(`Sum of worker displayed earnings: ${
- filteredWorkers.reduce((sum, w) => {
- if (w.status === 'online' && totalHashrate > 0) {
- const hashrateShare = parseFloat(w.hashrate_3hr || 0) / totalHashrate;
- return sum + parseFloat((onlinePool * hashrateShare).toFixed(8));
- } else if (w.status === 'offline' && offlineWorkers.length > 0) {
- return sum + parseFloat((offlinePool / offlineWorkers.length).toFixed(8));
- }
- return sum;
- }, 0)
- } BTC`);
-}
-
-// Filter worker data based on current filter state
-function filterWorkersData(workers) {
- if (!workers) return [];
-
- return workers.filter(worker => {
- const workerName = worker.name.toLowerCase();
- const isOnline = worker.status === 'online';
- const workerType = worker.type.toLowerCase();
-
- // Check if worker matches filter
- let matchesFilter = false;
- if (filterState.currentFilter === 'all') {
- matchesFilter = true;
- } else if (filterState.currentFilter === 'online' && isOnline) {
- matchesFilter = true;
- } else if (filterState.currentFilter === 'offline' && !isOnline) {
- matchesFilter = true;
- } else if (filterState.currentFilter === 'asic' && workerType === 'asic') {
- matchesFilter = true;
- } else if (filterState.currentFilter === 'fpga' && workerType === 'fpga') {
- matchesFilter = true;
- }
-
- // Check if worker matches search term
- const matchesSearch = workerName.includes(filterState.searchTerm);
-
- return matchesFilter && matchesSearch;
- });
-}
-
-// Apply filter to rendered worker cards
-function filterWorkers() {
- if (!workerData || !workerData.workers) return;
-
- // Re-render the worker grid with current filters
- updateWorkerGrid();
-}
-
-// Modified updateSummaryStats function for workers.js
-function updateSummaryStats() {
- if (!workerData) return;
-
- // Update worker counts
- $('#workers-count').text(workerData.workers_total || 0);
- $('#workers-online').text(workerData.workers_online || 0);
- $('#workers-offline').text(workerData.workers_offline || 0);
-
- // Update worker ring percentage
- const onlinePercent = workerData.workers_total > 0 ?
- workerData.workers_online / workerData.workers_total : 0;
- $('.worker-ring').css('--online-percent', onlinePercent);
-
- // IMPORTANT: Update total hashrate using EXACT format matching main dashboard
- // This ensures the displayed value matches exactly what's on the main page
- if (workerData.total_hashrate !== undefined) {
- // Format with exactly 1 decimal place - matches main dashboard format
- const formattedHashrate = Number(workerData.total_hashrate).toFixed(1);
- $('#total-hashrate').text(`${formattedHashrate} ${workerData.hashrate_unit || 'TH/s'}`);
- } else {
- $('#total-hashrate').text(`0.0 ${workerData.hashrate_unit || 'TH/s'}`);
- }
-
- // Update other summary stats
- $('#total-earnings').text(`${(workerData.total_earnings || 0).toFixed(8)} BTC`);
- $('#daily-sats').text(`${numberWithCommas(workerData.daily_sats || 0)} sats`);
- $('#avg-acceptance-rate').text(`${(workerData.avg_acceptance_rate || 0).toFixed(2)}%`);
-}
-
-// Initialize mini chart
-function initializeMiniChart() {
- const ctx = document.getElementById('total-hashrate-chart').getContext('2d');
-
- // Generate some sample data to start
- const labels = Array(24).fill('').map((_, i) => i);
- const data = [750, 760, 755, 770, 780, 775, 760, 765, 770, 775, 780, 790, 785, 775, 770, 765, 780, 785, 775, 770, 775, 780, 775, 774.8];
-
- miniChart = new Chart(ctx, {
- type: 'line',
- data: {
- labels: labels,
- datasets: [{
- data: data,
- borderColor: '#1137F5',
- backgroundColor: 'rgba(57, 255, 20, 0.1)',
- fill: true,
- tension: 0.3,
- borderWidth: 1.5,
- pointRadius: 0
- }]
- },
- options: {
- responsive: true,
- maintainAspectRatio: false,
- scales: {
- x: { display: false },
- y: {
- display: false,
- min: Math.min(...data) * 0.9,
- max: Math.max(...data) * 1.1
- }
- },
- plugins: {
- legend: { display: false },
- tooltip: { enabled: false }
- },
- animation: false,
- elements: {
- line: {
- tension: 0.4
- }
- }
- }
- });
-}
-
-// Update mini chart with real data
-function updateMiniChart() {
- if (!miniChart || !workerData || !workerData.hashrate_history) return;
-
- // Extract hashrate data from history
- const historyData = workerData.hashrate_history;
- if (!historyData || historyData.length === 0) return;
-
- // Get the values for the chart
- const values = historyData.map(item => parseFloat(item.value) || 0);
- const labels = historyData.map(item => item.time);
-
- // Update chart data
- miniChart.data.labels = labels;
- miniChart.data.datasets[0].data = values;
-
- // Update y-axis range
- const min = Math.min(...values);
- const max = Math.max(...values);
- miniChart.options.scales.y.min = min * 0.9;
- miniChart.options.scales.y.max = max * 1.1;
-
- // Update the chart
- miniChart.update('none');
-}
-
-// Update progress bar
-function updateProgressBar() {
- if (currentProgress < PROGRESS_MAX) {
- currentProgress++;
- const progressPercent = (currentProgress / PROGRESS_MAX) * 100;
- $("#bitcoin-progress-inner").css("width", progressPercent + "%");
-
- // Add glowing effect when close to completion
- if (progressPercent > 80) {
- $("#bitcoin-progress-inner").addClass("glow-effect");
- } else {
- $("#bitcoin-progress-inner").removeClass("glow-effect");
- }
-
- // Update remaining seconds text
- let remainingSeconds = PROGRESS_MAX - currentProgress;
- if (remainingSeconds <= 0) {
- $("#progress-text").text("Waiting for update...");
- $("#bitcoin-progress-inner").addClass("waiting-for-update");
- } else {
- $("#progress-text").text(remainingSeconds + "s to next update");
- $("#bitcoin-progress-inner").removeClass("waiting-for-update");
- }
-
- // Check for main dashboard refresh near the end to ensure sync
- if (currentProgress >= 55) { // When we're getting close to refresh time
- try {
- const lastDashboardRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
- const secondsSinceDashboardRefresh = (Date.now() - lastDashboardRefresh) / 1000;
-
- // If main dashboard just refreshed (within last 5 seconds)
- if (secondsSinceDashboardRefresh <= 5) {
- console.log("Detected recent dashboard refresh, syncing now");
- resetProgressBar();
- fetchWorkerData();
- return;
- }
- } catch (e) {
- console.error("Error checking dashboard refresh status:", e);
- }
- }
- } else {
- // Reset progress bar if it's time to refresh
- // But first check if the main dashboard refreshed recently
- try {
- const lastDashboardRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
- const secondsSinceDashboardRefresh = (Date.now() - lastDashboardRefresh) / 1000;
-
- // If dashboard refreshed in the last 10 seconds, wait for it instead of refreshing ourselves
- if (secondsSinceDashboardRefresh < 10) {
- console.log("Waiting for dashboard refresh event instead of refreshing independently");
- return;
- }
- } catch (e) {
- console.error("Error checking dashboard refresh status:", e);
- }
-
- // If main dashboard hasn't refreshed recently, do our own refresh
- if (Date.now() - lastUpdateTime > PROGRESS_MAX * 1000) {
- console.log("Progress bar expired, fetching data");
- fetchWorkerData();
- }
- }
-}
-
-// Reset progress bar
-function resetProgressBar() {
- currentProgress = 0;
- $("#bitcoin-progress-inner").css("width", "0%");
- $("#bitcoin-progress-inner").removeClass("glow-effect");
- $("#bitcoin-progress-inner").removeClass("waiting-for-update");
- $("#progress-text").text(PROGRESS_MAX + "s to next update");
-}
-
-// Update the last updated timestamp
-function updateLastUpdated() {
- if (!workerData || !workerData.timestamp) return;
-
- try {
- const timestamp = new Date(workerData.timestamp);
- $("#lastUpdated").html("Last Updated: " +
- timestamp.toLocaleString() + " ");
- } catch (e) {
- console.error("Error formatting timestamp:", e);
- }
-}
-
-// Format numbers with commas
-function numberWithCommas(x) {
- if (x == null) return "N/A";
- return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
-}
diff --git a/Custom-Ocean.xyz-Dashboard/templates/boot.html b/Custom-Ocean.xyz-Dashboard/templates/boot.html
deleted file mode 100644
index a03b35d..0000000
--- a/Custom-Ocean.xyz-Dashboard/templates/boot.html
+++ /dev/null
@@ -1,563 +0,0 @@
-
-
-
-
-
- Ocean.xyz Pool Miner - Initializing...
-
-
-
-
- SKIP
-
- Loading mining data...
-
-██████╗ ████████╗ ██████╗ ██████╗ ███████╗
-██╔══██╗╚══██╔══╝██╔════╝ ██╔═══██╗██╔════╝
-██████╔╝ ██║ ██║ ██║ ██║███████╗
-██╔══██╗ ██║ ██║ ██║ ██║╚════██║
-██████╔╝ ██║ ╚██████╗ ╚██████╔╝███████║
-╚═════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝
- v.21
-
-
-
-
-
- Initialize mining dashboard? [Y/N]:
-
-
-
-
-
-
-
-
-
-
diff --git a/Custom-Ocean.xyz-Dashboard/templates/error.html b/Custom-Ocean.xyz-Dashboard/templates/error.html
deleted file mode 100644
index 732024b..0000000
--- a/Custom-Ocean.xyz-Dashboard/templates/error.html
+++ /dev/null
@@ -1,161 +0,0 @@
-
-
-
-
-
- Error - Mining Dashboard
-
-
-
-
-
-
-
-
-
diff --git a/Custom-Ocean.xyz-Dashboard/templates/index.html b/Custom-Ocean.xyz-Dashboard/templates/index.html
deleted file mode 100644
index a96c0ef..0000000
--- a/Custom-Ocean.xyz-Dashboard/templates/index.html
+++ /dev/null
@@ -1,1071 +0,0 @@
-
-
-
-
-
-
-
-
-
-
- Ocean.xyz Pool Mining Dashboard v 0.2
-
-
-
-
-
-
-
-
-
-
-
-
Made by @DJO₿leezy
-
-
-
Last Updated: {{ current_time }}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Status:
-
- {% if metrics and metrics.workers_hashing and metrics.workers_hashing > 0 %}
- ONLINE
- {% else %}
- OFFLINE
- {% endif %}
-
-
-
- Workers Hashing:
- {{ metrics.workers_hashing or 0 }}
-
-
-
- Last Share:
- {{ metrics.total_last_share or "N/A" }}
-
-
-
-
-
-
-
-
-
-
-
-
-
- Pool Total Hashrate:
-
- {% if metrics and metrics.pool_total_hashrate and metrics.pool_total_hashrate_unit %}
- {{ metrics.pool_total_hashrate }} {{ metrics.pool_total_hashrate_unit[:-2]|upper ~ metrics.pool_total_hashrate_unit[-2:] }}
- {% else %}
- N/A
- {% endif %}
-
-
-
-
-
- 24hr Avg Hashrate:
-
- {% if metrics and metrics.hashrate_24hr %}
- {{ metrics.hashrate_24hr }}
- {% if metrics.hashrate_24hr_unit %}
- {{ metrics.hashrate_24hr_unit[:-2]|upper ~ metrics.hashrate_24hr_unit[-2:] }}
- {% else %}
- TH/s
- {% endif %}
- {% else %}
- N/A
- {% endif %}
-
-
-
-
- 3hr Avg Hashrate:
-
- {% if metrics and metrics.hashrate_3hr %}
- {{ metrics.hashrate_3hr }}
- {% if metrics.hashrate_3hr_unit %}
- {{ metrics.hashrate_3hr_unit[:-2]|upper ~ metrics.hashrate_3hr_unit[-2:] }}
- {% else %}
- TH/s
- {% endif %}
- {% else %}
- N/A
- {% endif %}
-
-
-
-
- 10min Avg Hashrate:
-
- {% if metrics and metrics.hashrate_10min %}
- {{ metrics.hashrate_10min }}
- {% if metrics.hashrate_10min_unit %}
- {{ metrics.hashrate_10min_unit[:-2]|upper ~ metrics.hashrate_10min_unit[-2:] }}
- {% else %}
- TH/s
- {% endif %}
- {% else %}
- N/A
- {% endif %}
-
-
-
-
- 60sec Avg Hashrate:
-
- {% if metrics and metrics.hashrate_60sec %}
- {{ metrics.hashrate_60sec }}
- {% if metrics.hashrate_60sec_unit %}
- {{ metrics.hashrate_60sec_unit[:-2]|upper ~ metrics.hashrate_60sec_unit[-2:] }}
- {% else %}
- TH/s
- {% endif %}
- {% else %}
- N/A
- {% endif %}
-
-
-
-
-
-
-
-
-
-
-
-
- Block Number:
-
- {% if metrics and metrics.block_number %}
- {{ metrics.block_number|commafy }}
- {% else %}
- N/A
- {% endif %}
-
-
-
-
- BTC Price:
-
- {% if metrics and metrics.btc_price %}
- ${{ "%.2f"|format(metrics.btc_price) }}
- {% else %}
- $0.00
- {% endif %}
-
-
-
-
- Network Hashrate:
-
- {% if metrics and metrics.network_hashrate %}
- {{ metrics.network_hashrate|round|commafy }} EH/s
- {% else %}
- N/A
- {% endif %}
-
-
-
-
- Difficulty:
-
- {% if metrics and metrics.difficulty %}
- {{ metrics.difficulty|round|commafy }}
- {% else %}
- N/A
- {% endif %}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Daily Mined (Net):
-
- {% if metrics and metrics.daily_mined_sats %}
- {{ metrics.daily_mined_sats|commafy }} sats
- {% else %}
- 0 sats
- {% endif %}
-
-
-
-
- Monthly Mined (Net):
-
- {% if metrics and metrics.monthly_mined_sats %}
- {{ metrics.monthly_mined_sats|commafy }} sats
- {% else %}
- 0 sats
- {% endif %}
-
-
-
-
- Est. Earnings/Day:
-
- {% if metrics and metrics.estimated_earnings_per_day_sats %}
- {{ metrics.estimated_earnings_per_day_sats|commafy }} sats
- {% else %}
- 0 sats
- {% endif %}
-
-
-
-
- Est. Earnings/Block:
-
- {% if metrics and metrics.estimated_earnings_next_block_sats %}
- {{ metrics.estimated_earnings_next_block_sats|commafy }} sats
- {% else %}
- 0 sats
- {% endif %}
-
-
-
-
- Est. Rewards in Window:
-
- {% if metrics and metrics.estimated_rewards_in_window_sats %}
- {{ metrics.estimated_rewards_in_window_sats|commafy }} sats
- {% else %}
- 0 sats
- {% endif %}
-
-
-
-
-
-
-
-
-
-
-
-
- Daily Revenue:
-
- {% if metrics and metrics.daily_revenue is defined and metrics.daily_revenue is not none %}
- ${{ "%.2f"|format(metrics.daily_revenue) }}
- {% else %}
- $0.00
- {% endif %}
-
-
-
-
- Daily Power Cost:
-
- {% if metrics and metrics.daily_power_cost is defined and metrics.daily_power_cost is not none %}
- ${{ "%.2f"|format(metrics.daily_power_cost) }}
- {% else %}
- $0.00
- {% endif %}
-
-
-
-
- Daily Profit (USD):
-
- {% if metrics and metrics.daily_profit_usd is defined and metrics.daily_profit_usd is not none %}
- ${{ "%.2f"|format(metrics.daily_profit_usd) }}
- {% else %}
- $0.00
- {% endif %}
-
-
-
-
- Monthly Profit (USD):
-
- {% if metrics and metrics.monthly_profit_usd is defined and metrics.monthly_profit_usd is not none %}
- ${{ "%.2f"|format(metrics.monthly_profit_usd) }}
- {% else %}
- $0.00
- {% endif %}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- Unpaid Earnings:
-
- {% if metrics and metrics.unpaid_earnings %}
- {{ metrics.unpaid_earnings }} BTC
- {% else %}
- 0 BTC
- {% endif %}
-
-
-
-
- Last Block:
-
- {{ metrics.last_block_height if metrics and metrics.last_block_height else "N/A" }}
-
- —
-
- {{ metrics.last_block_time if metrics and metrics.last_block_time else "N/A" }}
-
- —
-
- {% if metrics and metrics.last_block_earnings %}
- +{{ metrics.last_block_earnings|int|commafy }} sats
- {% else %}
- +0 sats
- {% endif %}
-
-
-
-
- Est. Time to Payout:
-
- {{ metrics.est_time_to_payout if metrics and metrics.est_time_to_payout else "N/A" }}
-
-
-
-
- Blocks Found:
-
- {{ metrics.blocks_found if metrics and metrics.blocks_found else "0" }}
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Custom-Ocean.xyz-Dashboard/templates/workers.html b/Custom-Ocean.xyz-Dashboard/templates/workers.html
deleted file mode 100644
index 754be23..0000000
--- a/Custom-Ocean.xyz-Dashboard/templates/workers.html
+++ /dev/null
@@ -1,941 +0,0 @@
-
-
-
-
-
-
-
-
-
-
- Workers Overview - Ocean.xyz Pool Mining Dashboard v 0.2
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- {{ workers_total }}
-
-
-
Workers
-
- {{ workers_online }} /
- {{ workers_offline }}
-
-
-
-
-
- {% if total_hashrate is defined %}
- {{ "%.1f"|format(total_hashrate) }} {{ hashrate_unit }}
- {% else %}
- N/A
- {% endif %}
-
-
Total Hashrate
-
-
-
-
-
-
-
- {% if total_earnings is defined %}
- {{ "%.8f"|format(total_earnings) }} BTC
- {% else %}
- N/A
- {% endif %}
-
-
Lifetime Earnings
-
-
-
-
- {% if daily_sats is defined %}
- {{ daily_sats|commafy }} sats
- {% else %}
- N/A
- {% endif %}
-
-
Daily Sats
-
-
-
-
- {% if avg_acceptance_rate is defined %}
- {{ "%.2f"|format(avg_acceptance_rate) }}%
- {% else %}
- N/A
- {% endif %}
-
-
Acceptance Rate
-
-
-
-
-
-
-
-
-
-
-
- All Workers
- Online
- Offline
- ASIC
- FPGA
-
-
-
-
-
-
-
-
-
-
-
-
-
-
60s to next update
-
-
Uptime: 0h 0m 0s
-
-
-
-
-
-
-
-
-
-
-