diff --git a/App.py b/App.py
new file mode 100644
index 0000000..4c2b0d3
--- /dev/null
+++ b/App.py
@@ -0,0 +1,686 @@
+"""
+Main application module for the Bitcoin Mining Dashboard.
+"""
+import os
+import logging
+import time
+import gc
+import psutil
+import signal
+import sys
+import threading
+import json
+from flask import Flask, render_template, jsonify, Response, request
+from datetime import datetime
+from zoneinfo import ZoneInfo
+from flask_caching import Cache
+from apscheduler.schedulers.background import BackgroundScheduler
+
+# Import custom modules
+from config import load_config
+from data_service import MiningDashboardService
+from worker_service import WorkerService
+from state_manager import StateManager, arrow_history, metrics_log
+
+# Initialize Flask app
+app = Flask(__name__)
+
+# Set up caching using a simple in-memory cache
+cache = Cache(app, config={'CACHE_TYPE': 'SimpleCache', 'CACHE_DEFAULT_TIMEOUT': 10})
+
+# Global variables for SSE connections and metrics
+MAX_SSE_CONNECTIONS = 10 # Maximum concurrent SSE connections
+MAX_SSE_CONNECTION_TIME = 900 # 15 minutes maximum SSE connection time
+active_sse_connections = 0
+sse_connections_lock = threading.Lock()
+
+# Global variables for metrics and scheduling
+cached_metrics = None
+last_metrics_update_time = None
+scheduler_last_successful_run = None
+scheduler_recreate_lock = threading.Lock()
+
+# Track scheduler health
+scheduler = None
+
+# Global start time
+SERVER_START_TIME = datetime.now(ZoneInfo("America/Los_Angeles"))
+
+# Configure logging
+logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
+
+# Initialize state manager with Redis URL from environment
+redis_url = os.environ.get("REDIS_URL")
+state_manager = StateManager(redis_url)
+
+# --- Disable Client Caching for All Responses ---
+@app.after_request
+def add_header(response):
+ """Disable browser caching for all responses."""
+ response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
+ response.headers["Pragma"] = "no-cache"
+ response.headers["Expires"] = "0"
+ return response
+
+# --- Memory usage monitoring ---
+def log_memory_usage():
+ """Log current memory usage."""
+ try:
+ process = psutil.Process(os.getpid())
+ mem_info = process.memory_info()
+ logging.info(f"Memory usage: {mem_info.rss / 1024 / 1024:.2f} MB (RSS)")
+
+ # Log the size of key data structures
+ logging.info(f"Arrow history entries: {sum(len(v) for v in arrow_history.values() if isinstance(v, list))}")
+ logging.info(f"Metrics log entries: {len(metrics_log)}")
+ logging.info(f"Active SSE connections: {active_sse_connections}")
+ except Exception as e:
+ logging.error(f"Error logging memory usage: {e}")
+
+# --- Modified update_metrics_job function ---
+def update_metrics_job(force=False):
+ """
+ Background job to update metrics.
+
+ Args:
+ force (bool): Whether to force update regardless of timing
+ """
+ global cached_metrics, last_metrics_update_time, scheduler, scheduler_last_successful_run
+
+ try:
+ # Check scheduler health - enhanced logic to detect failed executors
+ if not scheduler or not hasattr(scheduler, 'running'):
+ logging.error("Scheduler object is invalid, attempting to recreate")
+ with scheduler_recreate_lock:
+ create_scheduler()
+ return
+
+ if not scheduler.running:
+ logging.warning("Scheduler stopped unexpectedly, attempting to restart")
+ try:
+ scheduler.start()
+ logging.info("Scheduler restarted successfully")
+ except Exception as e:
+ logging.error(f"Failed to restart scheduler: {e}")
+ # More aggressive recovery - recreate scheduler entirely
+ with scheduler_recreate_lock:
+ create_scheduler()
+ return
+
+ # Test the scheduler's executor by checking its state
+ try:
+ # Check if any jobs exist and are scheduled
+ jobs = scheduler.get_jobs()
+ if not jobs:
+ logging.error("No jobs found in scheduler - recreating")
+ with scheduler_recreate_lock:
+ create_scheduler()
+ return
+
+ # Check if the next run time is set for any job
+ next_runs = [job.next_run_time for job in jobs]
+ if not any(next_runs):
+ logging.error("No jobs with next_run_time found - recreating scheduler")
+ with scheduler_recreate_lock:
+ create_scheduler()
+ return
+ except RuntimeError as e:
+ # Properly handle the "cannot schedule new futures after shutdown" error
+ if "cannot schedule new futures after shutdown" in str(e):
+ logging.error("Detected dead executor, recreating scheduler")
+ with scheduler_recreate_lock:
+ create_scheduler()
+ return
+ except Exception as e:
+ logging.error(f"Error checking scheduler state: {e}")
+
+ # Skip update if the last one was too recent (prevents overlapping runs)
+ # Unless force=True is specified
+ current_time = time.time()
+ if not force and last_metrics_update_time and (current_time - last_metrics_update_time < 30):
+ logging.info("Skipping metrics update - previous update too recent")
+ return
+
+ # Set last update time to now
+ last_metrics_update_time = current_time
+
+ # Add timeout handling with a timer
+ job_timeout = 45 # seconds
+ job_successful = False
+
+ def timeout_handler():
+ if not job_successful:
+ logging.error("Background job timed out after 45 seconds")
+
+ # Set timeout timer
+ timer = threading.Timer(job_timeout, timeout_handler)
+ timer.daemon = True
+ timer.start()
+
+ try:
+ # Use the dashboard service to fetch metrics
+ metrics = dashboard_service.fetch_metrics()
+ if metrics:
+ # Update cached metrics
+ cached_metrics = metrics
+
+ # Update state history
+ state_manager.update_metrics_history(metrics)
+
+ logging.info("Background job: Metrics updated successfully")
+ job_successful = True
+
+ # Mark successful run time for watchdog
+ scheduler_last_successful_run = time.time()
+
+ # Persist critical state
+ state_manager.persist_critical_state(cached_metrics, scheduler_last_successful_run, last_metrics_update_time)
+
+ # Periodically check and prune data to prevent memory growth
+ if current_time % 300 < 60: # Every ~5 minutes
+ state_manager.prune_old_data()
+
+ # Only save state to Redis on a similar schedule, not every update
+ if current_time % 300 < 60: # Every ~5 minutes
+ state_manager.save_graph_state()
+
+ # Periodic full memory cleanup (every 2 hours)
+ if current_time % 7200 < 60: # Every ~2 hours
+ logging.info("Performing full memory cleanup")
+ gc.collect(generation=2) # Force full collection
+ else:
+ logging.error("Background job: Metrics update returned None")
+ except Exception as e:
+ logging.error(f"Background job: Unexpected error: {e}")
+ import traceback
+ logging.error(traceback.format_exc())
+ log_memory_usage()
+ finally:
+ # Cancel timer in finally block to ensure it's always canceled
+ timer.cancel()
+ except Exception as e:
+ logging.error(f"Background job: Unhandled exception: {e}")
+ import traceback
+ logging.error(traceback.format_exc())
+
+# --- SchedulerWatchdog to monitor and recover ---
+def scheduler_watchdog():
+ """Periodically check if the scheduler is running and healthy."""
+ global scheduler, scheduler_last_successful_run
+
+ try:
+ # If no successful run in past 2 minutes, consider the scheduler dead
+ if (scheduler_last_successful_run is None or
+ time.time() - scheduler_last_successful_run > 120):
+ logging.warning("Scheduler watchdog: No successful runs detected in last 2 minutes")
+
+ # Check if actual scheduler exists and is reported as running
+ if not scheduler or not getattr(scheduler, 'running', False):
+ logging.error("Scheduler watchdog: Scheduler appears to be dead, recreating")
+
+ # Use the lock to avoid multiple threads recreating simultaneously
+ with scheduler_recreate_lock:
+ create_scheduler()
+ except Exception as e:
+ logging.error(f"Error in scheduler watchdog: {e}")
+
+# --- Create Scheduler ---
+def create_scheduler():
+ """Create and configure a new scheduler instance with proper error handling."""
+ try:
+ # Stop existing scheduler if it exists
+ global scheduler
+ if 'scheduler' in globals() and scheduler:
+ try:
+ # Check if scheduler is running before attempting to shut it down
+ if hasattr(scheduler, 'running') and scheduler.running:
+ logging.info("Shutting down existing scheduler before creating a new one")
+ scheduler.shutdown(wait=False)
+ except Exception as e:
+ logging.error(f"Error shutting down existing scheduler: {e}")
+
+ # Create a new scheduler with more robust configuration
+ new_scheduler = BackgroundScheduler(
+ job_defaults={
+ 'coalesce': True, # Combine multiple missed runs into a single one
+ 'max_instances': 1, # Prevent job overlaps
+ 'misfire_grace_time': 30 # Allow misfires up to 30 seconds
+ }
+ )
+
+ # Add the update job
+ new_scheduler.add_job(
+ func=update_metrics_job,
+ trigger="interval",
+ seconds=60,
+ id='update_metrics_job',
+ replace_existing=True
+ )
+
+ # Add watchdog job - runs every 30 seconds to check scheduler health
+ new_scheduler.add_job(
+ func=scheduler_watchdog,
+ trigger="interval",
+ seconds=30,
+ id='scheduler_watchdog',
+ replace_existing=True
+ )
+
+ # Start the scheduler
+ new_scheduler.start()
+ logging.info("Scheduler created and started successfully")
+ scheduler = new_scheduler
+ return new_scheduler
+ except Exception as e:
+ logging.error(f"Error creating scheduler: {e}")
+ return None
+
+# --- Custom Template Filter ---
+@app.template_filter('commafy')
+def commafy(value):
+ """Add commas to numbers for better readability."""
+ try:
+ return "{:,}".format(int(value))
+ except Exception:
+ return value
+
+# --- Fixed SSE Endpoint with proper request context handling ---
+@app.route('/stream')
+def stream():
+ """SSE endpoint for real-time updates."""
+ # Important: Capture any request context information BEFORE the generator
+ # This ensures we're not trying to access request outside its context
+
+ def event_stream():
+ global active_sse_connections, cached_metrics
+ client_id = None
+
+ try:
+ # Check if we're at the connection limit
+ with sse_connections_lock:
+ if active_sse_connections >= MAX_SSE_CONNECTIONS:
+ logging.warning(f"Connection limit reached ({MAX_SSE_CONNECTIONS}), refusing new SSE connection")
+ yield f"data: {{\"error\": \"Too many connections, please try again later\", \"retry\": 5000}}\n\n"
+ return
+
+ active_sse_connections += 1
+ client_id = f"client-{int(time.time() * 1000) % 10000}"
+ logging.info(f"SSE {client_id}: Connection established (total: {active_sse_connections})")
+
+ # Set a maximum connection time - increased to 15 minutes for better user experience
+ end_time = time.time() + MAX_SSE_CONNECTION_TIME
+ last_timestamp = None
+
+ # Send initial data immediately to prevent delay in dashboard updates
+ if cached_metrics:
+ yield f"data: {json.dumps(cached_metrics)}\n\n"
+ last_timestamp = cached_metrics.get("server_timestamp")
+ else:
+ # Send ping if no data available yet
+ yield f"data: {{\"type\": \"ping\", \"client_id\": \"{client_id}\"}}\n\n"
+
+ # Main event loop with improved error handling
+ while time.time() < end_time:
+ try:
+ # Send data only if it's changed
+ if cached_metrics and cached_metrics.get("server_timestamp") != last_timestamp:
+ data = json.dumps(cached_metrics)
+ last_timestamp = cached_metrics.get("server_timestamp")
+ yield f"data: {data}\n\n"
+
+ # Send regular pings about every 30 seconds to keep connection alive
+ if int(time.time()) % 30 == 0:
+ yield f"data: {{\"type\": \"ping\", \"time\": {int(time.time())}, \"connections\": {active_sse_connections}}}\n\n"
+
+ # Sleep to reduce CPU usage
+ time.sleep(1)
+
+ # Warn client 60 seconds before timeout so client can prepare to reconnect
+ remaining_time = end_time - time.time()
+ if remaining_time < 60 and int(remaining_time) % 15 == 0: # Every 15 sec in last minute
+ yield f"data: {{\"type\": \"timeout_warning\", \"remaining\": {int(remaining_time)}}}\n\n"
+
+ except Exception as e:
+ logging.error(f"SSE {client_id}: Error in stream: {e}")
+ time.sleep(2) # Prevent tight error loops
+
+ # Connection timeout reached - send a reconnect instruction to client
+ logging.info(f"SSE {client_id}: Connection timeout reached ({MAX_SSE_CONNECTION_TIME}s)")
+ yield f"data: {{\"type\": \"timeout\", \"message\": \"Connection timeout reached\", \"reconnect\": true}}\n\n"
+
+ except GeneratorExit:
+ # This is how we detect client disconnection
+ logging.info(f"SSE {client_id}: Client disconnected (GeneratorExit)")
+ # Don't yield here - just let the generator exit normally
+
+ finally:
+ # Always decrement the connection counter when done
+ with sse_connections_lock:
+ active_sse_connections = max(0, active_sse_connections - 1)
+ logging.info(f"SSE {client_id}: Connection closed (remaining: {active_sse_connections})")
+
+ # Configure response with improved error handling
+ try:
+ response = Response(event_stream(), mimetype="text/event-stream")
+ response.headers['Cache-Control'] = 'no-cache'
+ response.headers['X-Accel-Buffering'] = 'no' # Disable nginx buffering
+ response.headers['Access-Control-Allow-Origin'] = '*' # Allow CORS
+ return response
+ except Exception as e:
+ logging.error(f"Error creating SSE response: {e}")
+ return jsonify({"error": "Internal server error"}), 500
+
+# Duplicate stream endpoint for the dashboard path
+@app.route('/dashboard/stream')
+def dashboard_stream():
+ """Duplicate of the stream endpoint for the dashboard route."""
+ return stream()
+
+# --- Routes ---
+@app.route("/")
+def boot():
+ """Serve the boot sequence page."""
+ return render_template("boot.html", base_url=request.host_url.rstrip('/'))
+
+# --- Updated Dashboard Route ---
+@app.route("/dashboard")
+def dashboard():
+ """Serve the main dashboard page."""
+ global cached_metrics, last_metrics_update_time
+
+ # Make sure we have metrics data before rendering the template
+ if cached_metrics is None:
+ # Force an immediate metrics fetch regardless of the time since last update
+ logging.info("Dashboard accessed with no cached metrics - forcing immediate fetch")
+ try:
+ # Force update with the force parameter
+ update_metrics_job(force=True)
+ except Exception as e:
+ logging.error(f"Error during forced metrics fetch: {e}")
+
+ # If still None after our attempt, create default metrics
+ if cached_metrics is None:
+ default_metrics = {
+ "server_timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat(),
+ "server_start_time": SERVER_START_TIME.astimezone(ZoneInfo("America/Los_Angeles")).isoformat(),
+ "hashrate_24hr": None,
+ "hashrate_24hr_unit": "TH/s",
+ "hashrate_3hr": None,
+ "hashrate_3hr_unit": "TH/s",
+ "hashrate_10min": None,
+ "hashrate_10min_unit": "TH/s",
+ "hashrate_60sec": None,
+ "hashrate_60sec_unit": "TH/s",
+ "pool_total_hashrate": None,
+ "pool_total_hashrate_unit": "TH/s",
+ "workers_hashing": 0,
+ "total_last_share": None,
+ "block_number": None,
+ "btc_price": 0,
+ "network_hashrate": 0,
+ "difficulty": 0,
+ "daily_revenue": 0,
+ "daily_power_cost": 0,
+ "daily_profit_usd": 0,
+ "monthly_profit_usd": 0,
+ "daily_mined_sats": 0,
+ "monthly_mined_sats": 0,
+ "unpaid_earnings": "0",
+ "est_time_to_payout": None,
+ "last_block_height": None,
+ "last_block_time": None,
+ "last_block_earnings": None,
+ "blocks_found": "0",
+ "estimated_earnings_per_day_sats": 0,
+ "estimated_earnings_next_block_sats": 0,
+ "estimated_rewards_in_window_sats": 0,
+ "arrow_history": {}
+ }
+ logging.warning("Rendering dashboard with default metrics - no data available yet")
+ current_time = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%Y-%m-%d %I:%M:%S %p")
+ return render_template("dashboard.html", metrics=default_metrics, current_time=current_time)
+
+ # If we have metrics, use them
+ current_time = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%Y-%m-%d %I:%M:%S %p")
+ return render_template("dashboard.html", metrics=cached_metrics, current_time=current_time)
+
+@app.route("/api/metrics")
+def api_metrics():
+ """API endpoint for metrics data."""
+ if cached_metrics is None:
+ update_metrics_job()
+ return jsonify(cached_metrics)
+
+# --- Workers Dashboard Route and API ---
+@app.route("/workers")
+def workers_dashboard():
+ """Serve the workers overview dashboard page."""
+ current_time = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%Y-%m-%d %I:%M:%S %p")
+
+ # Only get minimal worker stats for initial page load
+ # Client-side JS will fetch the full data via API
+ workers_data = worker_service.get_workers_data(cached_metrics)
+
+ return render_template("workers.html",
+ current_time=current_time,
+ workers_total=workers_data.get('workers_total', 0),
+ workers_online=workers_data.get('workers_online', 0),
+ workers_offline=workers_data.get('workers_offline', 0),
+ total_hashrate=workers_data.get('total_hashrate', 0),
+ hashrate_unit=workers_data.get('hashrate_unit', 'TH/s'),
+ total_earnings=workers_data.get('total_earnings', 0),
+ daily_sats=workers_data.get('daily_sats', 0),
+ avg_acceptance_rate=workers_data.get('avg_acceptance_rate', 0))
+
+@app.route("/api/workers")
+def api_workers():
+ """API endpoint for worker data."""
+ # Get the force_refresh parameter from the query string (default: False)
+ force_refresh = request.args.get('force', 'false').lower() == 'true'
+ return jsonify(worker_service.get_workers_data(cached_metrics, force_refresh=force_refresh))
+
+# --- New Time Endpoint for Fine Syncing ---
+@app.route("/api/time")
+def api_time():
+ """API endpoint for server time."""
+ return jsonify({
+ "server_timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat(),
+ "server_start_time": SERVER_START_TIME.astimezone(ZoneInfo("America/Los_Angeles")).isoformat()
+ })
+
+# Health check endpoint with detailed diagnostics
+@app.route("/api/health")
+def health_check():
+ """Health check endpoint with enhanced system diagnostics."""
+ # Calculate uptime
+ uptime_seconds = (datetime.now(ZoneInfo("America/Los_Angeles")) - SERVER_START_TIME).total_seconds()
+
+ # Get process memory usage
+ try:
+ process = psutil.Process(os.getpid())
+ mem_info = process.memory_info()
+ memory_usage_mb = mem_info.rss / 1024 / 1024
+ memory_percent = process.memory_percent()
+ except Exception as e:
+ logging.error(f"Error getting memory usage: {e}")
+ memory_usage_mb = 0
+ memory_percent = 0
+
+ # Check data freshness
+ data_age = 0
+ if cached_metrics and cached_metrics.get("server_timestamp"):
+ try:
+ last_update = datetime.fromisoformat(cached_metrics["server_timestamp"])
+ data_age = (datetime.now(ZoneInfo("America/Los_Angeles")) - last_update).total_seconds()
+ except Exception as e:
+ logging.error(f"Error calculating data age: {e}")
+
+ # Determine health status
+ health_status = "healthy"
+ if data_age > 300: # Data older than 5 minutes
+ health_status = "degraded"
+ if not cached_metrics:
+ health_status = "unhealthy"
+
+ # Build response with detailed diagnostics
+ status = {
+ "status": health_status,
+ "uptime": uptime_seconds,
+ "uptime_formatted": f"{int(uptime_seconds // 3600)}h {int((uptime_seconds % 3600) // 60)}m {int(uptime_seconds % 60)}s",
+ "connections": active_sse_connections,
+ "memory": {
+ "usage_mb": round(memory_usage_mb, 2),
+ "percent": round(memory_percent, 2)
+ },
+ "data": {
+ "last_update": cached_metrics.get("server_timestamp") if cached_metrics else None,
+ "age_seconds": int(data_age),
+ "available": cached_metrics is not None
+ },
+ "scheduler": {
+ "running": scheduler.running if hasattr(scheduler, "running") else False,
+ "last_successful_run": scheduler_last_successful_run
+ },
+ "redis": {
+ "connected": state_manager.redis_client is not None
+ },
+ "timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
+ }
+
+ # Log health check if status is not healthy
+ if health_status != "healthy":
+ logging.warning(f"Health check returning {health_status} status: {status}")
+
+ return jsonify(status)
+
+# Add enhanced scheduler health check endpoint
+@app.route("/api/scheduler-health")
+def scheduler_health():
+ """API endpoint for scheduler health information."""
+ try:
+ scheduler_status = {
+ "running": scheduler.running if hasattr(scheduler, "running") else False,
+ "job_count": len(scheduler.get_jobs()) if hasattr(scheduler, "get_jobs") else 0,
+ "next_run": str(scheduler.get_jobs()[0].next_run_time) if hasattr(scheduler, "get_jobs") and scheduler.get_jobs() else None,
+ "last_update": last_metrics_update_time,
+ "time_since_update": time.time() - last_metrics_update_time if last_metrics_update_time else None,
+ "last_successful_run": scheduler_last_successful_run,
+ "time_since_successful": time.time() - scheduler_last_successful_run if scheduler_last_successful_run else None
+ }
+ return jsonify(scheduler_status)
+ except Exception as e:
+ return jsonify({"error": str(e)}), 500
+
+# Add a health check route that can attempt to fix the scheduler if needed
+@app.route("/api/fix-scheduler", methods=["POST"])
+def fix_scheduler():
+ """API endpoint to recreate the scheduler."""
+ try:
+ with scheduler_recreate_lock:
+ new_scheduler = create_scheduler()
+ if new_scheduler:
+ global scheduler
+ scheduler = new_scheduler
+ return jsonify({"status": "success", "message": "Scheduler recreated successfully"})
+ else:
+ return jsonify({"status": "error", "message": "Failed to recreate scheduler"}), 500
+ except Exception as e:
+ return jsonify({"status": "error", "message": str(e)}), 500
+
+@app.route("/api/force-refresh", methods=["POST"])
+def force_refresh():
+ """Emergency endpoint to force metrics refresh."""
+ logging.warning("Emergency force-refresh requested")
+ try:
+ # Force fetch new metrics
+ metrics = dashboard_service.fetch_metrics()
+ if metrics:
+ global cached_metrics, scheduler_last_successful_run
+ cached_metrics = metrics
+ scheduler_last_successful_run = time.time()
+ logging.info(f"Force refresh successful, new timestamp: {metrics['server_timestamp']}")
+ return jsonify({"status": "success", "message": "Metrics refreshed", "timestamp": metrics['server_timestamp']})
+ else:
+ return jsonify({"status": "error", "message": "Failed to fetch metrics"}), 500
+ except Exception as e:
+ logging.error(f"Force refresh error: {e}")
+ return jsonify({"status": "error", "message": str(e)}), 500
+
+@app.errorhandler(404)
+def page_not_found(e):
+ """Error handler for 404 errors."""
+ return render_template("error.html", message="Page not found."), 404
+
+@app.errorhandler(500)
+def internal_server_error(e):
+ """Error handler for 500 errors."""
+ logging.error("Internal server error: %s", e)
+ return render_template("error.html", message="Internal server error."), 500
+
+class RobustMiddleware:
+ """WSGI middleware for enhanced error handling."""
+ def __init__(self, app):
+ self.app = app
+
+ def __call__(self, environ, start_response):
+ try:
+ return self.app(environ, start_response)
+ except Exception as e:
+ logging.exception("Unhandled exception in WSGI app")
+ start_response("500 Internal Server Error", [("Content-Type", "text/html")])
+ return [b"
Internal Server Error "]
+
+# Add the middleware
+app.wsgi_app = RobustMiddleware(app.wsgi_app)
+
+# Initialize the dashboard service and worker service
+config = load_config()
+dashboard_service = MiningDashboardService(
+ config.get("power_cost", 0.0),
+ config.get("power_usage", 0.0),
+ config.get("wallet")
+)
+worker_service = WorkerService()
+
+# Restore critical state if available
+last_run, last_update = state_manager.load_critical_state()
+if last_run:
+ scheduler_last_successful_run = last_run
+if last_update:
+ last_metrics_update_time = last_update
+
+# Initialize the scheduler
+scheduler = create_scheduler()
+
+# Graceful shutdown handler for clean termination
+def graceful_shutdown(signum, frame):
+ """Handle shutdown signals gracefully."""
+ logging.info(f"Received shutdown signal {signum}, shutting down gracefully")
+
+ # Save state before shutting down
+ state_manager.save_graph_state()
+
+ # Stop the scheduler
+ if scheduler:
+ try:
+ scheduler.shutdown(wait=True) # wait for running jobs to complete
+ logging.info("Scheduler shutdown complete")
+ except Exception as e:
+ logging.error(f"Error shutting down scheduler: {e}")
+
+ # Log connection info before exit
+ logging.info(f"Active SSE connections at shutdown: {active_sse_connections}")
+
+ # Exit with success code
+ sys.exit(0)
+
+# Register signal handlers
+signal.signal(signal.SIGTERM, graceful_shutdown)
+signal.signal(signal.SIGINT, graceful_shutdown)
+
+# Run once at startup to initialize data
+update_metrics_job(force=True)
+
+if __name__ == "__main__":
+ # When deploying with Gunicorn in Docker, run with --workers=1 --threads=8 to ensure global state is shared.
+ app.run(host="0.0.0.0", port=5000, debug=False, use_reloader=False)
diff --git a/README.md b/README.md
index 846e54e..7ee8d5b 100644
--- a/README.md
+++ b/README.md
@@ -1,150 +1,150 @@
-# Ocean.xyz Bitcoin Mining Dashboard
-
-## A Practical Monitoring Solution for Bitcoin Miners
-
-This open-source dashboard provides comprehensive monitoring for Ocean.xyz pool miners, offering real-time data on hashrate, profitability, and worker status. Designed to be resource-efficient and user-friendly, it helps miners maintain oversight of their operations.
-
----
-## Gallery:
-
-
-
----
-
-## Practical Mining Intelligence
-
-The dashboard aggregates essential metrics in one accessible interface:
-
-- **Profitability Analysis**: Monitor daily and monthly earnings in BTC and USD
-- **Worker Status**: Track online/offline status of mining equipment
-- **Payout Monitoring**: View unpaid balance and estimated time to next payout
-- **Network Metrics**: Stay informed of difficulty adjustments and network hashrate
-- **Cost Analysis**: Calculate profit margins based on power consumption
-
-## Key Features
-
-### Mining Performance Metrics
-- **Hashrate Visualization**: Clear graphical representation of hashrate trends
-- **Financial Calculations**: Automatic conversion between BTC and USD values
-- **Payout Estimation**: Projected time until minimum payout threshold is reached
-- **Network Intelligence**: Current Bitcoin price, difficulty, and total network hashrate
-
-### Worker Management
-- **Equipment Overview**: Consolidated view of all mining devices
-- **Status Monitoring**: Clear indicators for active and inactive devices
-- **Performance Data**: Individual hashrate, temperature, and acceptance rate metrics
-- **Filtering Options**: Sort and search by device type or operational status
-
-### Thoughtful Design Elements
-- **Retro Terminal Monitor**: A floating system monitor with classic design aesthetics
-- **Boot Sequence**: An engaging initialization sequence on startup
-- **Responsive Interface**: Adapts seamlessly to desktop and mobile devices
-
-## Quick Start
-
-### Installation
-
-1. Clone the repository
-2. Install dependencies:
- ```
- pip install -r requirements.txt
- ```
-3. Run the setup script:
- ```
- python setup.py
- ```
-4. Configure your wallet:
- ```json
- {
- "power_cost": 0.12,
- "power_usage": 3450,
- "wallet": "your-wallet-address"
- }
- ```
-5. Start the application:
- ```
- python App.py
- ```
-6. Open your browser at `http://localhost:5000`
-
-### Docker Deployment
-
-```bash
-docker run -d -p 5000:5000 \
- -e WALLET=your-wallet-address \
- -e POWER_COST=0.12 \
- -e POWER_USAGE=3450 \
- yourusername/bitcoin-mining-dashboard
-```
-
-For full deployment instructions, see [DEPLOYMENT.md](DEPLOYMENT.md).
-
-## Dashboard Components
-
-### Main Dashboard
-
-- Interactive hashrate visualization
-- Detailed profitability metrics
-- Network statistics
-- Current Bitcoin price
-- Balance and payment information
-
-### Workers Dashboard
-
-- Fleet summary with aggregate statistics
-- Individual worker performance metrics
-- Status indicators for each device
-- Flexible filtering and search functionality
-
-### Retro Terminal Monitor
-
-- Floating interface providing system statistics
-- Progress indicator for data refresh cycles
-- System uptime display
-- Minimizable design for unobtrusive monitoring
-- Thoughtful visual styling reminiscent of classic computer terminals
-
-## System Requirements
-
-The application is designed for efficient resource utilization:
-- Compatible with standard desktop and laptop computers
-- Modest CPU and memory requirements
-- Suitable for continuous operation
-- Cross-platform support for Windows, macOS, and Linux
-
-## Project Structure
-
-For details on the project's architecture and organization, see [PROJECT_STRUCTURE.md](PROJECT_STRUCTURE.md).
-
-## Troubleshooting
-
-For optimal performance:
-
-1. Use the refresh function if data appears outdated
-2. Verify network connectivity for consistent updates
-3. Restart the application after configuration changes
-4. Access the health endpoint at `/api/health` for system status information
-
-## Getting Started
-
-1. Download the latest release
-2. Configure with your mining information
-3. Launch the application to begin monitoring
-
-The dashboard requires only your Ocean.xyz mining wallet address for basic functionality.
-
----
-
-## Technical Foundation
-
-Built on Flask with Chart.js for visualization and Server-Sent Events for real-time updates, this dashboard retrieves data from Ocean.xyz and performs calculations based on current network metrics and your specified parameters.
-
-The application prioritizes stability and efficiency for reliable long-term operation. Source code is available for review and customization.
-
-## Acknowledgments
-
-- Ocean.xyz mining pool for their service
-- The open-source community for their contributions
-- Bitcoin protocol developers
-
-Available under the MIT License. This is an independent project not affiliated with Ocean.xyz.
+# Ocean.xyz Bitcoin Mining Dashboard
+
+## A Practical Monitoring Solution for Bitcoin Miners
+
+This open-source dashboard provides comprehensive monitoring for Ocean.xyz pool miners, offering real-time data on hashrate, profitability, and worker status. Designed to be resource-efficient and user-friendly, it helps miners maintain oversight of their operations.
+
+---
+## Gallery:
+
+
+
+---
+
+## Practical Mining Intelligence
+
+The dashboard aggregates essential metrics in one accessible interface:
+
+- **Profitability Analysis**: Monitor daily and monthly earnings in BTC and USD
+- **Worker Status**: Track online/offline status of mining equipment
+- **Payout Monitoring**: View unpaid balance and estimated time to next payout
+- **Network Metrics**: Stay informed of difficulty adjustments and network hashrate
+- **Cost Analysis**: Calculate profit margins based on power consumption
+
+## Key Features
+
+### Mining Performance Metrics
+- **Hashrate Visualization**: Clear graphical representation of hashrate trends
+- **Financial Calculations**: Automatic conversion between BTC and USD values
+- **Payout Estimation**: Projected time until minimum payout threshold is reached
+- **Network Intelligence**: Current Bitcoin price, difficulty, and total network hashrate
+
+### Worker Management
+- **Equipment Overview**: Consolidated view of all mining devices
+- **Status Monitoring**: Clear indicators for active and inactive devices
+- **Performance Data**: Individual hashrate, temperature, and acceptance rate metrics
+- **Filtering Options**: Sort and search by device type or operational status
+
+### Thoughtful Design Elements
+- **Retro Terminal Monitor**: A floating system monitor with classic design aesthetics
+- **Boot Sequence**: An engaging initialization sequence on startup
+- **Responsive Interface**: Adapts seamlessly to desktop and mobile devices
+
+## Quick Start
+
+### Installation
+
+1. Clone the repository
+2. Install dependencies:
+ ```
+ pip install -r requirements.txt
+ ```
+3. Run the setup script:
+ ```
+ python setup.py
+ ```
+4. Configure your wallet:
+ ```json
+ {
+ "power_cost": 0.12,
+ "power_usage": 3450,
+ "wallet": "your-wallet-address"
+ }
+ ```
+5. Start the application:
+ ```
+ python App.py
+ ```
+6. Open your browser at `http://localhost:5000`
+
+### Docker Deployment
+
+```bash
+docker run -d -p 5000:5000 \
+ -e WALLET=your-wallet-address \
+ -e POWER_COST=0.12 \
+ -e POWER_USAGE=3450 \
+ yourusername/bitcoin-mining-dashboard
+```
+
+For full deployment instructions, see [DEPLOYMENT.md](DEPLOYMENT.md).
+
+## Dashboard Components
+
+### Main Dashboard
+
+- Interactive hashrate visualization
+- Detailed profitability metrics
+- Network statistics
+- Current Bitcoin price
+- Balance and payment information
+
+### Workers Dashboard
+
+- Fleet summary with aggregate statistics
+- Individual worker performance metrics
+- Status indicators for each device
+- Flexible filtering and search functionality
+
+### Retro Terminal Monitor
+
+- Floating interface providing system statistics
+- Progress indicator for data refresh cycles
+- System uptime display
+- Minimizable design for unobtrusive monitoring
+- Thoughtful visual styling reminiscent of classic computer terminals
+
+## System Requirements
+
+The application is designed for efficient resource utilization:
+- Compatible with standard desktop and laptop computers
+- Modest CPU and memory requirements
+- Suitable for continuous operation
+- Cross-platform support for Windows, macOS, and Linux
+
+## Project Structure
+
+For details on the project's architecture and organization, see [PROJECT_STRUCTURE.md](PROJECT_STRUCTURE.md).
+
+## Troubleshooting
+
+For optimal performance:
+
+1. Use the refresh function if data appears outdated
+2. Verify network connectivity for consistent updates
+3. Restart the application after configuration changes
+4. Access the health endpoint at `/api/health` for system status information
+
+## Getting Started
+
+1. Download the latest release
+2. Configure with your mining information
+3. Launch the application to begin monitoring
+
+The dashboard requires only your Ocean.xyz mining wallet address for basic functionality.
+
+---
+
+## Technical Foundation
+
+Built on Flask with Chart.js for visualization and Server-Sent Events for real-time updates, this dashboard retrieves data from Ocean.xyz and performs calculations based on current network metrics and your specified parameters.
+
+The application prioritizes stability and efficiency for reliable long-term operation. Source code is available for review and customization.
+
+## Acknowledgments
+
+- Ocean.xyz mining pool for their service
+- The open-source community for their contributions
+- Bitcoin protocol developers
+
+Available under the MIT License. This is an independent project not affiliated with Ocean.xyz.
diff --git a/config.json b/config.json
new file mode 100644
index 0000000..9514130
--- /dev/null
+++ b/config.json
@@ -0,0 +1,5 @@
+{
+ "power_cost": 0.0,
+ "power_usage": 0.0,
+ "wallet": "bc1py5zmrtssheq3shd8cptpl5l5m3txxr5afynyg2gyvam6w78s4dlqqnt4v9"
+}
\ No newline at end of file
diff --git a/config.py b/config.py
new file mode 100644
index 0000000..64c8fa8
--- /dev/null
+++ b/config.py
@@ -0,0 +1,69 @@
+"""
+Configuration management module for the Bitcoin Mining Dashboard.
+Responsible for loading and managing application settings.
+"""
+import os
+import json
+import logging
+
+# Default configuration file path
+CONFIG_FILE = "config.json"
+
+def load_config():
+ """
+ Load configuration from file or return defaults if file doesn't exist.
+
+ Returns:
+ dict: Configuration dictionary with settings
+ """
+ default_config = {
+ "power_cost": 0.0,
+ "power_usage": 0.0,
+ "wallet": "bc1py5zmrtssheq3shd8cptpl5l5m3txxr5afynyg2gyvam6w78s4dlqqnt4v9"
+ }
+
+ if os.path.exists(CONFIG_FILE):
+ try:
+ with open(CONFIG_FILE, "r") as f:
+ config = json.load(f)
+ logging.info(f"Configuration loaded from {CONFIG_FILE}")
+ return config
+ except Exception as e:
+ logging.error(f"Error loading config: {e}")
+ else:
+ logging.warning(f"Config file {CONFIG_FILE} not found, using defaults")
+
+ return default_config
+
+def save_config(config):
+ """
+ Save configuration to file.
+
+ Args:
+ config (dict): Configuration dictionary to save
+
+ Returns:
+ bool: True if save was successful, False otherwise
+ """
+ try:
+ with open(CONFIG_FILE, "w") as f:
+ json.dump(config, f, indent=2)
+ logging.info(f"Configuration saved to {CONFIG_FILE}")
+ return True
+ except Exception as e:
+ logging.error(f"Error saving config: {e}")
+ return False
+
+def get_value(key, default=None):
+ """
+ Get a configuration value by key with fallback to default.
+
+ Args:
+ key (str): Configuration key to look up
+ default: Default value if key is not found
+
+ Returns:
+ Value for the key or default if not found
+ """
+ config = load_config()
+ return config.get(key, default)
diff --git a/data_service.py b/data_service.py
new file mode 100644
index 0000000..1f0a1bd
--- /dev/null
+++ b/data_service.py
@@ -0,0 +1,438 @@
+"""
+Data service module for fetching and processing mining data.
+"""
+import logging
+import re
+import time
+import json
+from datetime import datetime, timedelta
+from zoneinfo import ZoneInfo
+from concurrent.futures import ThreadPoolExecutor
+import requests
+from bs4 import BeautifulSoup
+
+from models import OceanData, convert_to_ths
+
+class MiningDashboardService:
+ """Service for fetching and processing mining dashboard data."""
+
+ def __init__(self, power_cost, power_usage, wallet):
+ """
+ Initialize the mining dashboard service.
+
+ Args:
+ power_cost (float): Cost of power in $ per kWh
+ power_usage (float): Power usage in watts
+ wallet (str): Bitcoin wallet address for Ocean.xyz
+ """
+ self.power_cost = power_cost
+ self.power_usage = power_usage
+ self.wallet = wallet
+ self.cache = {}
+ self.sats_per_btc = 100_000_000
+ self.previous_values = {}
+ self.session = requests.Session()
+
+ def fetch_metrics(self):
+ """
+ Fetch metrics from Ocean.xyz and other sources.
+
+ Returns:
+ dict: Mining metrics data
+ """
+ # Add execution time tracking
+ start_time = time.time()
+
+ try:
+ with ThreadPoolExecutor(max_workers=2) as executor:
+ future_ocean = executor.submit(self.get_ocean_data)
+ future_btc = executor.submit(self.get_bitcoin_stats)
+ try:
+ ocean_data = future_ocean.result(timeout=15)
+ btc_stats = future_btc.result(timeout=15)
+ except Exception as e:
+ logging.error(f"Error fetching metrics concurrently: {e}")
+ return None
+
+ if ocean_data is None:
+ logging.error("Failed to retrieve Ocean data")
+ return None
+
+ difficulty, network_hashrate, btc_price, block_count = btc_stats
+
+ # If we failed to get network hashrate, use a reasonable default to prevent division by zero
+ if network_hashrate is None:
+ logging.warning("Using default network hashrate")
+ network_hashrate = 500e18 # ~500 EH/s as a reasonable fallback
+
+ # If we failed to get BTC price, use a reasonable default
+ if btc_price is None:
+ logging.warning("Using default BTC price")
+ btc_price = 75000 # $75,000 as a reasonable fallback
+
+ # Convert hashrates to a common unit (TH/s) for consistency
+ hr3 = ocean_data.hashrate_3hr or 0
+ hr3_unit = (ocean_data.hashrate_3hr_unit or 'th/s').lower()
+ local_hashrate = convert_to_ths(hr3, hr3_unit) * 1e12 # Convert to H/s for calculation
+
+ hash_proportion = local_hashrate / network_hashrate if network_hashrate else 0
+ block_reward = 3.125
+ blocks_per_day = 86400 / 600
+ daily_btc_gross = hash_proportion * block_reward * blocks_per_day
+ daily_btc_net = daily_btc_gross * (1 - 0.02 - 0.028)
+
+ daily_revenue = round(daily_btc_net * btc_price, 2) if btc_price is not None else None
+ daily_power_cost = round((self.power_usage / 1000) * self.power_cost * 24, 2)
+ daily_profit_usd = round(daily_revenue - daily_power_cost, 2) if daily_revenue is not None else None
+ monthly_profit_usd = round(daily_profit_usd * 30, 2) if daily_profit_usd is not None else None
+
+ daily_mined_sats = int(round(daily_btc_net * self.sats_per_btc))
+ monthly_mined_sats = daily_mined_sats * 30
+
+ # Use default 0 for earnings if scraping returned None.
+ estimated_earnings_per_day = ocean_data.estimated_earnings_per_day if ocean_data.estimated_earnings_per_day is not None else 0
+ estimated_earnings_next_block = ocean_data.estimated_earnings_next_block if ocean_data.estimated_earnings_next_block is not None else 0
+ estimated_rewards_in_window = ocean_data.estimated_rewards_in_window if ocean_data.estimated_rewards_in_window is not None else 0
+
+ metrics = {
+ 'pool_total_hashrate': ocean_data.pool_total_hashrate,
+ 'pool_total_hashrate_unit': ocean_data.pool_total_hashrate_unit,
+ 'hashrate_24hr': ocean_data.hashrate_24hr,
+ 'hashrate_24hr_unit': ocean_data.hashrate_24hr_unit,
+ 'hashrate_3hr': ocean_data.hashrate_3hr,
+ 'hashrate_3hr_unit': ocean_data.hashrate_3hr_unit,
+ 'hashrate_10min': ocean_data.hashrate_10min,
+ 'hashrate_10min_unit': ocean_data.hashrate_10min_unit,
+ 'hashrate_5min': ocean_data.hashrate_5min,
+ 'hashrate_5min_unit': ocean_data.hashrate_5min_unit,
+ 'hashrate_60sec': ocean_data.hashrate_60sec,
+ 'hashrate_60sec_unit': ocean_data.hashrate_60sec_unit,
+ 'workers_hashing': ocean_data.workers_hashing,
+ 'btc_price': btc_price,
+ 'block_number': block_count,
+ 'network_hashrate': (network_hashrate / 1e18) if network_hashrate else None,
+ 'difficulty': difficulty,
+ 'daily_btc_net': daily_btc_net,
+ 'estimated_earnings_per_day': estimated_earnings_per_day,
+ 'daily_revenue': daily_revenue,
+ 'daily_power_cost': daily_power_cost,
+ 'daily_profit_usd': daily_profit_usd,
+ 'monthly_profit_usd': monthly_profit_usd,
+ 'daily_mined_sats': daily_mined_sats,
+ 'monthly_mined_sats': monthly_mined_sats,
+ 'estimated_earnings_next_block': estimated_earnings_next_block,
+ 'estimated_rewards_in_window': estimated_rewards_in_window,
+ 'unpaid_earnings': ocean_data.unpaid_earnings,
+ 'est_time_to_payout': ocean_data.est_time_to_payout,
+ 'last_block_height': ocean_data.last_block_height,
+ 'last_block_time': ocean_data.last_block_time,
+ 'total_last_share': ocean_data.total_last_share,
+ 'blocks_found': ocean_data.blocks_found or "0",
+ 'last_block_earnings': ocean_data.last_block_earnings
+ }
+ metrics['estimated_earnings_per_day_sats'] = int(round(estimated_earnings_per_day * self.sats_per_btc))
+ metrics['estimated_earnings_next_block_sats'] = int(round(estimated_earnings_next_block * self.sats_per_btc))
+ metrics['estimated_rewards_in_window_sats'] = int(round(estimated_rewards_in_window * self.sats_per_btc))
+
+ # --- Add server timestamps to the response in Los Angeles Time ---
+ metrics["server_timestamp"] = datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
+ metrics["server_start_time"] = datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
+
+ # Log execution time
+ execution_time = time.time() - start_time
+ metrics["execution_time"] = execution_time
+ if execution_time > 10:
+ logging.warning(f"Metrics fetch took {execution_time:.2f} seconds")
+ else:
+ logging.info(f"Metrics fetch completed in {execution_time:.2f} seconds")
+
+ return metrics
+
+ except Exception as e:
+ logging.error(f"Unexpected error in fetch_metrics: {e}")
+ return None
+
+ def get_ocean_data(self):
+ """
+ Get mining data from Ocean.xyz.
+
+ Returns:
+ OceanData: Ocean.xyz mining data
+ """
+ base_url = "https://ocean.xyz"
+ stats_url = f"{base_url}/stats/{self.wallet}"
+ headers = {
+ 'User-Agent': 'Mozilla/5.0',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
+ 'Cache-Control': 'no-cache'
+ }
+
+ # Create an empty data object to populate
+ data = OceanData()
+
+ try:
+ response = self.session.get(stats_url, headers=headers, timeout=10)
+ if not response.ok:
+ logging.error(f"Error fetching ocean data: status code {response.status_code}")
+ return None
+
+ soup = BeautifulSoup(response.text, 'html.parser')
+
+ # Safely extract pool status information
+ try:
+ pool_status = soup.find("p", id="pool-status-item")
+ if pool_status:
+ text = pool_status.get_text(strip=True)
+ m_total = re.search(r'HASHRATE:\s*([\d\.]+)\s*(\w+/s)', text, re.IGNORECASE)
+ if m_total:
+ raw_val = float(m_total.group(1))
+ unit = m_total.group(2)
+ data.pool_total_hashrate = raw_val
+ data.pool_total_hashrate_unit = unit
+ span = pool_status.find("span", class_="pool-status-newline")
+ if span:
+ last_block_text = span.get_text(strip=True)
+ m_block = re.search(r'LAST BLOCK:\s*(\d+\s*\(.*\))', last_block_text, re.IGNORECASE)
+ if m_block:
+ full_last_block = m_block.group(1)
+ data.last_block = full_last_block
+ match = re.match(r'(\d+)\s*\((.*?)\)', full_last_block)
+ if match:
+ data.last_block_height = match.group(1)
+ data.last_block_time = match.group(2)
+ else:
+ data.last_block_height = full_last_block
+ data.last_block_time = ""
+ except Exception as e:
+ logging.error(f"Error parsing pool status: {e}")
+
+ # Parse the earnings value from the earnings table and convert to sats.
+ try:
+ earnings_table = soup.find('tbody', id='earnings-tablerows')
+ if earnings_table:
+ latest_row = earnings_table.find('tr', class_='table-row')
+ if latest_row:
+ cells = latest_row.find_all('td', class_='table-cell')
+ if len(cells) >= 3:
+ earnings_text = cells[2].get_text(strip=True)
+ earnings_value = earnings_text.replace('BTC', '').strip()
+ try:
+ btc_earnings = float(earnings_value)
+ sats = int(round(btc_earnings * 100000000))
+ data.last_block_earnings = str(sats)
+ except Exception:
+ data.last_block_earnings = earnings_value
+ except Exception as e:
+ logging.error(f"Error parsing earnings data: {e}")
+
+ # Parse hashrate data from the hashrates table
+ try:
+ time_mapping = {
+ '24 hrs': ('hashrate_24hr', 'hashrate_24hr_unit'),
+ '3 hrs': ('hashrate_3hr', 'hashrate_3hr_unit'),
+ '10 min': ('hashrate_10min', 'hashrate_10min_unit'),
+ '5 min': ('hashrate_5min', 'hashrate_5min_unit'),
+ '60 sec': ('hashrate_60sec', 'hashrate_60sec_unit')
+ }
+ hashrate_table = soup.find('tbody', id='hashrates-tablerows')
+ if hashrate_table:
+ for row in hashrate_table.find_all('tr', class_='table-row'):
+ cells = row.find_all('td', class_='table-cell')
+ if len(cells) >= 2:
+ period_text = cells[0].get_text(strip=True).lower()
+ hashrate_str = cells[1].get_text(strip=True).lower()
+ try:
+ parts = hashrate_str.split()
+ hashrate_val = float(parts[0])
+ unit = parts[1] if len(parts) > 1 else 'th/s'
+ for key, (attr, unit_attr) in time_mapping.items():
+ if key.lower() in period_text:
+ setattr(data, attr, hashrate_val)
+ setattr(data, unit_attr, unit)
+ break
+ except Exception as e:
+ logging.error(f"Error parsing hashrate '{hashrate_str}': {e}")
+ except Exception as e:
+ logging.error(f"Error parsing hashrate table: {e}")
+
+ # Parse lifetime stats data
+ try:
+ lifetime_snap = soup.find('div', id='lifetimesnap-statcards')
+ if lifetime_snap:
+ for container in lifetime_snap.find_all('div', class_='blocks dashboard-container'):
+ label_div = container.find('div', class_='blocks-label')
+ if label_div:
+ label_text = label_div.get_text(strip=True).lower()
+ earnings_span = label_div.find_next('span', class_=lambda x: x != 'tooltiptext')
+ if earnings_span:
+ span_text = earnings_span.get_text(strip=True)
+ try:
+ earnings_value = float(span_text.split()[0].replace(',', ''))
+ if "earnings" in label_text and "day" in label_text:
+ data.estimated_earnings_per_day = earnings_value
+ except Exception:
+ pass
+ except Exception as e:
+ logging.error(f"Error parsing lifetime stats: {e}")
+
+ # Parse payout stats data
+ try:
+ payout_snap = soup.find('div', id='payoutsnap-statcards')
+ if payout_snap:
+ for container in payout_snap.find_all('div', class_='blocks dashboard-container'):
+ label_div = container.find('div', class_='blocks-label')
+ if label_div:
+ label_text = label_div.get_text(strip=True).lower()
+ earnings_span = label_div.find_next('span', class_=lambda x: x != 'tooltiptext')
+ if earnings_span:
+ span_text = earnings_span.get_text(strip=True)
+ try:
+ earnings_value = float(span_text.split()[0].replace(',', ''))
+ if "earnings" in label_text and "block" in label_text:
+ data.estimated_earnings_next_block = earnings_value
+ elif "rewards" in label_text and "window" in label_text:
+ data.estimated_rewards_in_window = earnings_value
+ except Exception:
+ pass
+ except Exception as e:
+ logging.error(f"Error parsing payout stats: {e}")
+
+ # Parse user stats data
+ try:
+ usersnap = soup.find('div', id='usersnap-statcards')
+ if usersnap:
+ for container in usersnap.find_all('div', class_='blocks dashboard-container'):
+ label_div = container.find('div', class_='blocks-label')
+ if label_div:
+ label_text = label_div.get_text(strip=True).lower()
+ value_span = label_div.find_next('span', class_=lambda x: x != 'tooltiptext')
+ if value_span:
+ span_text = value_span.get_text(strip=True)
+ if "workers currently hashing" in label_text:
+ try:
+ data.workers_hashing = int(span_text.replace(",", ""))
+ except Exception:
+ pass
+ elif "unpaid earnings" in label_text and "btc" in span_text.lower():
+ try:
+ data.unpaid_earnings = float(span_text.split()[0].replace(',', ''))
+ except Exception:
+ pass
+ elif "estimated time until minimum payout" in label_text:
+ data.est_time_to_payout = span_text
+ except Exception as e:
+ logging.error(f"Error parsing user stats: {e}")
+
+ # Parse blocks found data
+ try:
+ blocks_container = soup.find(lambda tag: tag.name == "div" and "blocks found" in tag.get_text(strip=True).lower())
+ if blocks_container:
+ span = blocks_container.find_next_sibling("span")
+ if span:
+ num_match = re.search(r'(\d+)', span.get_text(strip=True))
+ if num_match:
+ data.blocks_found = num_match.group(1)
+ except Exception as e:
+ logging.error(f"Error parsing blocks found: {e}")
+
+ # Parse last share time data
+ try:
+ workers_table = soup.find("tbody", id="workers-tablerows")
+ if workers_table:
+ for row in workers_table.find_all("tr", class_="table-row"):
+ cells = row.find_all("td")
+ if cells and cells[0].get_text(strip=True).lower().startswith("total"):
+ last_share_str = cells[2].get_text(strip=True)
+ try:
+ naive_dt = datetime.strptime(last_share_str, "%Y-%m-%d %H:%M")
+ utc_dt = naive_dt.replace(tzinfo=ZoneInfo("UTC"))
+ la_dt = utc_dt.astimezone(ZoneInfo("America/Los_Angeles"))
+ data.total_last_share = la_dt.strftime("%Y-%m-%d %I:%M %p")
+ except Exception as e:
+ logging.error(f"Error converting last share time '{last_share_str}': {e}")
+ data.total_last_share = last_share_str
+ break
+ except Exception as e:
+ logging.error(f"Error parsing last share time: {e}")
+
+ return data
+ except Exception as e:
+ logging.error(f"Error fetching Ocean data: {e}")
+ return None
+
+ def fetch_url(self, url: str, timeout: int = 5):
+ """
+ Fetch URL with error handling.
+
+ Args:
+ url (str): URL to fetch
+ timeout (int): Timeout in seconds
+
+ Returns:
+ Response: Request response or None if failed
+ """
+ try:
+ return self.session.get(url, timeout=timeout)
+ except Exception as e:
+ logging.error(f"Error fetching {url}: {e}")
+ return None
+
+ def get_bitcoin_stats(self):
+ """
+ Fetch Bitcoin network statistics with improved error handling and caching.
+
+ Returns:
+ tuple: (difficulty, network_hashrate, btc_price, block_count)
+ """
+ urls = {
+ "difficulty": "https://blockchain.info/q/getdifficulty",
+ "hashrate": "https://blockchain.info/q/hashrate",
+ "ticker": "https://blockchain.info/ticker",
+ "blockcount": "https://blockchain.info/q/getblockcount"
+ }
+
+ # Use previous cached values as defaults if available
+ difficulty = self.cache.get("difficulty")
+ network_hashrate = self.cache.get("network_hashrate")
+ btc_price = self.cache.get("btc_price")
+ block_count = self.cache.get("block_count")
+
+ try:
+ with ThreadPoolExecutor(max_workers=4) as executor:
+ futures = {key: executor.submit(self.fetch_url, url) for key, url in urls.items()}
+ responses = {key: futures[key].result(timeout=5) for key in futures}
+
+ # Process each response individually with error handling
+ if responses["difficulty"] and responses["difficulty"].ok:
+ try:
+ difficulty = float(responses["difficulty"].text)
+ self.cache["difficulty"] = difficulty
+ except (ValueError, TypeError) as e:
+ logging.error(f"Error parsing difficulty: {e}")
+
+ if responses["hashrate"] and responses["hashrate"].ok:
+ try:
+ network_hashrate = float(responses["hashrate"].text) * 1e9
+ self.cache["network_hashrate"] = network_hashrate
+ except (ValueError, TypeError) as e:
+ logging.error(f"Error parsing network hashrate: {e}")
+
+ if responses["ticker"] and responses["ticker"].ok:
+ try:
+ ticker_data = responses["ticker"].json()
+ btc_price = float(ticker_data.get("USD", {}).get("last", btc_price))
+ self.cache["btc_price"] = btc_price
+ except (ValueError, TypeError, json.JSONDecodeError) as e:
+ logging.error(f"Error parsing BTC price: {e}")
+
+ if responses["blockcount"] and responses["blockcount"].ok:
+ try:
+ block_count = int(responses["blockcount"].text)
+ self.cache["block_count"] = block_count
+ except (ValueError, TypeError) as e:
+ logging.error(f"Error parsing block count: {e}")
+
+ except Exception as e:
+ logging.error(f"Error fetching Bitcoin stats: {e}")
+
+ return difficulty, network_hashrate, btc_price, block_count
diff --git a/dockerfile b/dockerfile
new file mode 100644
index 0000000..d8e4ade
--- /dev/null
+++ b/dockerfile
@@ -0,0 +1,66 @@
+FROM python:3.9-slim
+
+WORKDIR /app
+
+# Install curl for healthcheck and other dependencies
+RUN apt-get update && \
+ apt-get install -y --no-install-recommends curl && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/*
+
+# Install dependencies first to leverage Docker cache
+COPY requirements.txt .
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy the application files
+COPY *.py .
+COPY config.json .
+COPY setup.py .
+
+# Create necessary directories
+RUN mkdir -p static/css static/js templates logs
+
+# Copy static files and templates
+COPY static/css/*.css static/css/
+COPY static/js/*.js static/js/
+COPY templates/*.html templates/
+
+# Run the setup script to ensure proper organization
+RUN python setup.py
+
+# Create a non-root user for better security
+RUN adduser --disabled-password --gecos '' appuser
+
+# Change ownership of the /app directory so appuser can write files
+RUN chown -R appuser:appuser /app
+
+# Create a directory for logs with proper permissions
+RUN mkdir -p /app/logs && chown -R appuser:appuser /app/logs
+
+# Switch to non-root user
+USER appuser
+
+# Expose the application port
+EXPOSE 5000
+
+# Set environment variables
+ENV FLASK_ENV=production
+ENV PYTHONUNBUFFERED=1
+ENV PYTHON_UNBUFFERED=1
+
+# Add healthcheck
+HEALTHCHECK --interval=15s --timeout=5s --start-period=30s --retries=3 \
+ CMD curl -f http://localhost:5000/api/health || exit 1
+
+# Use Gunicorn as the production WSGI server
+CMD ["gunicorn", "-b", "0.0.0.0:5000", "App:app", \
+ "--workers=1", \
+ "--threads=12", \
+ "--timeout=600", \
+ "--keep-alive=5", \
+ "--log-level=info", \
+ "--access-logfile=-", \
+ "--error-logfile=-", \
+ "--log-file=-", \
+ "--graceful-timeout=60", \
+ "--worker-tmp-dir=/dev/shm"]
\ No newline at end of file
diff --git a/models.py b/models.py
new file mode 100644
index 0000000..b539083
--- /dev/null
+++ b/models.py
@@ -0,0 +1,59 @@
+"""
+Data models for the Bitcoin Mining Dashboard.
+"""
+from dataclasses import dataclass
+
+@dataclass
+class OceanData:
+ """Data structure for Ocean.xyz pool mining data."""
+ pool_total_hashrate: float = None
+ pool_total_hashrate_unit: str = None
+ hashrate_24hr: float = None
+ hashrate_24hr_unit: str = None
+ hashrate_3hr: float = None
+ hashrate_3hr_unit: str = None
+ hashrate_10min: float = None
+ hashrate_10min_unit: str = None
+ hashrate_5min: float = None
+ hashrate_5min_unit: str = None
+ hashrate_60sec: float = None
+ hashrate_60sec_unit: str = None
+ estimated_earnings_per_day: float = None
+ estimated_earnings_next_block: float = None
+ estimated_rewards_in_window: float = None
+ workers_hashing: int = None
+ unpaid_earnings: float = None
+ est_time_to_payout: str = None
+ last_block: str = None
+ last_block_height: str = None
+ last_block_time: str = None
+ blocks_found: str = None
+ total_last_share: str = "N/A"
+ last_block_earnings: str = None
+
+def convert_to_ths(value: float, unit: str) -> float:
+ """
+ Convert any hashrate unit to TH/s equivalent.
+
+ Args:
+ value (float): The numerical value of the hashrate
+ unit (str): The unit of measurement (e.g., 'PH/s', 'EH/s', etc.)
+
+ Returns:
+ float: The hashrate value in TH/s
+ """
+ unit = unit.lower()
+ if 'ph/s' in unit:
+ return value * 1000 # 1 PH/s = 1000 TH/s
+ elif 'eh/s' in unit:
+ return value * 1000000 # 1 EH/s = 1,000,000 TH/s
+ elif 'gh/s' in unit:
+ return value / 1000 # 1 TH/s = 1000 GH/s
+ elif 'mh/s' in unit:
+ return value / 1000000 # 1 TH/s = 1,000,000 MH/s
+ elif 'th/s' in unit:
+ return value
+ else:
+ # Log unexpected unit
+ logging.warning(f"Unexpected hashrate unit: {unit}, defaulting to treating as TH/s")
+ return value
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..cb8facb
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,21 @@
+Flask==2.3.3
+requests==2.31.0
+beautifulsoup4==4.12.2
+Flask-Caching==2.1.0
+gunicorn==22.0.0
+htmlmin==0.1.12
+redis==5.0.1
+APScheduler==3.10.4
+psutil==5.9.5
+Werkzeug==2.3.7
+Jinja2==3.1.2
+itsdangerous==2.1.2
+MarkupSafe==2.1.3
+soupsieve==2.5
+tzdata==2023.3
+pytz==2023.3
+tzlocal==5.0.1
+urllib3==2.0.7
+idna==3.4
+certifi==2023.7.22
+six==1.16.0
\ No newline at end of file
diff --git a/state_manager.py b/state_manager.py
new file mode 100644
index 0000000..c5faf05
--- /dev/null
+++ b/state_manager.py
@@ -0,0 +1,379 @@
+"""
+State manager module for handling persistent state and history.
+"""
+import logging
+import json
+import time
+import gc
+import threading
+import redis
+
+# Global variables for arrow history, legacy hashrate history, and a log of full metrics snapshots.
+arrow_history = {} # stored per second
+hashrate_history = []
+metrics_log = []
+
+# Limits for data collections to prevent memory growth
+MAX_HISTORY_ENTRIES = 180 # 3 hours worth at 1 min intervals
+
+# Lock for thread safety
+state_lock = threading.Lock()
+
+class StateManager:
+ """Manager for persistent state and history data."""
+
+ def __init__(self, redis_url=None):
+ """
+ Initialize the state manager.
+
+ Args:
+ redis_url (str, optional): Redis URL for persistent storage
+ """
+ self.redis_client = self._connect_to_redis(redis_url) if redis_url else None
+ self.STATE_KEY = "graph_state"
+ self.last_save_time = 0
+
+ # Load state if available
+ self.load_graph_state()
+
+ def _connect_to_redis(self, redis_url):
+ """
+ Connect to Redis with retry logic.
+
+ Args:
+ redis_url (str): Redis URL
+
+ Returns:
+ redis.Redis: Redis client or None if connection failed
+ """
+ if not redis_url:
+ logging.info("Redis URL not configured, using in-memory state only.")
+ return None
+
+ retry_count = 0
+ max_retries = 3
+
+ while retry_count < max_retries:
+ try:
+ client = redis.Redis.from_url(redis_url)
+ client.ping() # Test the connection
+ logging.info(f"Connected to Redis at {redis_url}")
+ return client
+ except Exception as e:
+ retry_count += 1
+ if retry_count < max_retries:
+ logging.warning(f"Redis connection attempt {retry_count} failed: {e}. Retrying...")
+ time.sleep(1) # Wait before retrying
+ else:
+ logging.error(f"Could not connect to Redis after {max_retries} attempts: {e}")
+ return None
+
+ def load_graph_state(self):
+ """Load graph state from Redis with support for the optimized format."""
+ global arrow_history, hashrate_history, metrics_log
+
+ if not self.redis_client:
+ logging.info("Redis not available, using in-memory state.")
+ return
+
+ try:
+ # Check version to handle format changes
+ version = self.redis_client.get(f"{self.STATE_KEY}_version")
+ version = version.decode('utf-8') if version else "1.0"
+
+ state_json = self.redis_client.get(self.STATE_KEY)
+ if state_json:
+ state = json.loads(state_json)
+
+ # Handle different versions of the data format
+ if version == "2.0": # Optimized format
+ # Restore arrow_history
+ compact_arrow_history = state.get("arrow_history", {})
+ for key, values in compact_arrow_history.items():
+ arrow_history[key] = [
+ {"time": entry.get("t", ""),
+ "value": entry.get("v", 0),
+ "arrow": ""} # Default empty arrow
+ for entry in values
+ ]
+
+ # Restore hashrate_history
+ hashrate_history = state.get("hashrate_history", [])
+
+ # Restore metrics_log
+ compact_metrics_log = state.get("metrics_log", [])
+ metrics_log = []
+ for entry in compact_metrics_log:
+ metrics_log.append({
+ "timestamp": entry.get("ts", ""),
+ "metrics": entry.get("m", {})
+ })
+ else: # Original format
+ arrow_history = state.get("arrow_history", {})
+ hashrate_history = state.get("hashrate_history", [])
+ metrics_log = state.get("metrics_log", [])
+
+ logging.info(f"Loaded graph state from Redis (format version {version}).")
+ else:
+ logging.info("No previous graph state found in Redis.")
+ except Exception as e:
+ logging.error(f"Error loading graph state from Redis: {e}")
+
+ def save_graph_state(self):
+ """Save graph state to Redis with optimized frequency, pruning, and data reduction."""
+ if not self.redis_client:
+ logging.info("Redis not available, skipping state save.")
+ return
+
+ # Check if we've saved recently to avoid too frequent saves
+ # Only save at most once every 5 minutes
+ current_time = time.time()
+ if hasattr(self, 'last_save_time') and current_time - self.last_save_time < 300: # 300 seconds = 5 minutes
+ logging.debug("Skipping Redis save - last save was less than 5 minutes ago")
+ return
+
+ # Update the last save time
+ self.last_save_time = current_time
+
+ # Prune data first to reduce volume
+ self.prune_old_data()
+
+ # Create compact versions of the data structures for Redis storage
+ try:
+ # 1. Create compact arrow_history with minimal data
+ compact_arrow_history = {}
+ for key, values in arrow_history.items():
+ if isinstance(values, list) and values:
+ # Only store recent history (last 2 hours)
+ recent_values = values[-120:] if len(values) > 120 else values
+ # Use shorter field names and remove unnecessary fields
+ compact_arrow_history[key] = [
+ {"t": entry["time"], "v": entry["value"]}
+ for entry in recent_values
+ ]
+
+ # 2. Only keep essential hashrate_history
+ compact_hashrate_history = hashrate_history[-60:] if len(hashrate_history) > 60 else hashrate_history
+
+ # 3. Only keep recent metrics_log entries (last 30 minutes)
+ # This is typically the largest data structure
+ compact_metrics_log = []
+ if metrics_log:
+ # Keep only last 30 entries (30 minutes assuming 1-minute updates)
+ recent_logs = metrics_log[-30:]
+
+ for entry in recent_logs:
+ # Only keep necessary fields from each metrics entry
+ if "metrics" in entry and "timestamp" in entry:
+ metrics_copy = {}
+ original_metrics = entry["metrics"]
+
+ # Only copy the most important metrics for historical tracking
+ essential_keys = [
+ "hashrate_60sec", "hashrate_24hr", "btc_price",
+ "workers_hashing", "unpaid_earnings", "difficulty",
+ "network_hashrate", "daily_profit_usd"
+ ]
+
+ for key in essential_keys:
+ if key in original_metrics:
+ metrics_copy[key] = original_metrics[key]
+
+ # Skip arrow_history within metrics as we already stored it separately
+ compact_metrics_log.append({
+ "ts": entry["timestamp"],
+ "m": metrics_copy
+ })
+
+ # Create the final state object
+ state = {
+ "arrow_history": compact_arrow_history,
+ "hashrate_history": compact_hashrate_history,
+ "metrics_log": compact_metrics_log
+ }
+
+ # Convert to JSON once to reuse and measure size
+ state_json = json.dumps(state)
+ data_size_kb = len(state_json) / 1024
+
+ # Log data size for monitoring
+ logging.info(f"Saving graph state to Redis: {data_size_kb:.2f} KB (optimized format)")
+
+ # Only save if data size is reasonable (adjust threshold as needed)
+ if data_size_kb > 2000: # 2MB warning threshold (reduced from 5MB)
+ logging.warning(f"Redis save data size is still large: {data_size_kb:.2f} KB")
+
+ # Store version info to handle future format changes
+ self.redis_client.set(f"{self.STATE_KEY}_version", "2.0")
+ self.redis_client.set(self.STATE_KEY, state_json)
+ logging.info(f"Successfully saved graph state to Redis ({data_size_kb:.2f} KB)")
+ except Exception as e:
+ logging.error(f"Error saving graph state to Redis: {e}")
+
+ def prune_old_data(self):
+ """Remove old data to prevent memory growth with optimized strategy."""
+ global arrow_history, metrics_log
+
+ with state_lock:
+ # Prune arrow_history with more sophisticated approach
+ for key in arrow_history:
+ if isinstance(arrow_history[key], list):
+ if len(arrow_history[key]) > MAX_HISTORY_ENTRIES:
+ # For most recent data (last hour) - keep every point
+ recent_data = arrow_history[key][-60:]
+
+ # For older data, reduce resolution by keeping every other point
+ older_data = arrow_history[key][:-60]
+ if len(older_data) > 0:
+ sparse_older_data = [older_data[i] for i in range(0, len(older_data), 2)]
+ arrow_history[key] = sparse_older_data + recent_data
+ else:
+ arrow_history[key] = recent_data
+
+ logging.info(f"Pruned {key} history from {len(arrow_history[key])} to {len(sparse_older_data + recent_data) if older_data else len(recent_data)} entries")
+
+ # Prune metrics_log more aggressively
+ if len(metrics_log) > MAX_HISTORY_ENTRIES:
+ # Keep most recent entries at full resolution
+ recent_logs = metrics_log[-60:]
+
+ # Reduce resolution of older entries
+ older_logs = metrics_log[:-60]
+ if len(older_logs) > 0:
+ sparse_older_logs = [older_logs[i] for i in range(0, len(older_logs), 3)] # Keep every 3rd entry
+ metrics_log = sparse_older_logs + recent_logs
+ logging.info(f"Pruned metrics log from {len(metrics_log)} to {len(sparse_older_logs + recent_logs)} entries")
+
+ # Free memory more aggressively
+ gc.collect()
+
+ def persist_critical_state(self, cached_metrics, scheduler_last_successful_run, last_metrics_update_time):
+ """
+ Store critical state in Redis for recovery after worker restarts.
+
+ Args:
+ cached_metrics (dict): Current metrics
+ scheduler_last_successful_run (float): Timestamp of last successful scheduler run
+ last_metrics_update_time (float): Timestamp of last metrics update
+ """
+ if not self.redis_client:
+ return
+
+ try:
+ # Only persist if we have valid data
+ if cached_metrics and cached_metrics.get("server_timestamp"):
+ state = {
+ "cached_metrics_timestamp": cached_metrics.get("server_timestamp"),
+ "last_successful_run": scheduler_last_successful_run,
+ "last_update_time": last_metrics_update_time
+ }
+ self.redis_client.set("critical_state", json.dumps(state))
+ logging.info(f"Persisted critical state to Redis, timestamp: {cached_metrics.get('server_timestamp')}")
+ except Exception as e:
+ logging.error(f"Error persisting critical state: {e}")
+
+ def load_critical_state(self):
+ """
+ Recover critical state variables after a worker restart.
+
+ Returns:
+ tuple: (last_successful_run, last_update_time)
+ """
+ if not self.redis_client:
+ return None, None
+
+ try:
+ state_json = self.redis_client.get("critical_state")
+ if state_json:
+ state = json.loads(state_json.decode('utf-8'))
+ last_successful_run = state.get("last_successful_run")
+ last_update_time = state.get("last_update_time")
+
+ logging.info(f"Loaded critical state from Redis, last run: {last_successful_run}")
+
+ # We don't restore cached_metrics itself, as we'll fetch fresh data
+ # Just note that we have state to recover from
+ logging.info(f"Last metrics timestamp from Redis: {state.get('cached_metrics_timestamp')}")
+
+ return last_successful_run, last_update_time
+ except Exception as e:
+ logging.error(f"Error loading critical state: {e}")
+
+ return None, None
+
+ def update_metrics_history(self, metrics):
+ """
+ Update history collections with new metrics data.
+
+ Args:
+ metrics (dict): New metrics data
+ """
+ global arrow_history, hashrate_history, metrics_log
+
+ # Skip if metrics is None
+ if not metrics:
+ return
+
+ arrow_keys = [
+ "pool_total_hashrate", "hashrate_24hr", "hashrate_3hr", "hashrate_10min",
+ "hashrate_60sec", "block_number", "btc_price", "network_hashrate",
+ "difficulty", "daily_revenue", "daily_power_cost", "daily_profit_usd",
+ "monthly_profit_usd", "daily_mined_sats", "monthly_mined_sats", "unpaid_earnings",
+ "estimated_earnings_per_day_sats", "estimated_earnings_next_block_sats", "estimated_rewards_in_window_sats",
+ "workers_hashing"
+ ]
+
+ # --- Bucket by second (Los Angeles Time) with thread safety ---
+ from datetime import datetime
+ from zoneinfo import ZoneInfo
+
+ current_second = datetime.now(ZoneInfo("America/Los_Angeles")).strftime("%H:%M:%S")
+
+ with state_lock:
+ for key in arrow_keys:
+ if metrics.get(key) is not None:
+ current_val = metrics[key]
+ arrow = ""
+ if key in arrow_history and arrow_history[key]:
+ try:
+ previous_val = arrow_history[key][-1]["value"]
+ if current_val > previous_val:
+ arrow = "↑"
+ elif current_val < previous_val:
+ arrow = "↓"
+ except Exception as e:
+ logging.error(f"Error calculating arrow for {key}: {e}")
+
+ if key not in arrow_history:
+ arrow_history[key] = []
+
+ if not arrow_history[key] or arrow_history[key][-1]["time"] != current_second:
+ arrow_history[key].append({
+ "time": current_second,
+ "value": current_val,
+ "arrow": arrow
+ })
+ else:
+ arrow_history[key][-1]["value"] = current_val
+ arrow_history[key][-1]["arrow"] = arrow
+
+ # Cap history to three hours worth (180 entries)
+ if len(arrow_history[key]) > MAX_HISTORY_ENTRIES:
+ arrow_history[key] = arrow_history[key][-MAX_HISTORY_ENTRIES:]
+
+ # --- Aggregate arrow_history by minute for the graph ---
+ aggregated_history = {}
+ for key, entries in arrow_history.items():
+ minute_groups = {}
+ for entry in entries:
+ minute = entry["time"][:5] # extract HH:MM
+ minute_groups[minute] = entry # take last entry for that minute
+ aggregated_history[key] = list(minute_groups.values())
+ metrics["arrow_history"] = aggregated_history
+ metrics["history"] = hashrate_history
+
+ entry = {"timestamp": datetime.now().isoformat(), "metrics": metrics}
+ metrics_log.append(entry)
+ # Cap the metrics log to three hours worth (180 entries)
+ if len(metrics_log) > MAX_HISTORY_ENTRIES:
+ metrics_log = metrics_log[-MAX_HISTORY_ENTRIES:]
diff --git a/static/css/boot.css b/static/css/boot.css
new file mode 100644
index 0000000..fb2be34
--- /dev/null
+++ b/static/css/boot.css
@@ -0,0 +1,215 @@
+/* Base Styles with a subtle radial background for extra depth */
+body {
+ background: linear-gradient(135deg, #121212, #000000);
+ color: #f7931a;
+ font-family: 'VT323', monospace;
+ font-size: 20px;
+ line-height: 1.4;
+ margin: 0;
+ padding: 10px;
+ overflow-x: hidden;
+ text-shadow: 0 0 5px rgba(247, 147, 26, 0.4);
+ height: calc(100vh - 100px);
+ display: flex;
+ flex-direction: column;
+}
+
+/* CRT Screen Effect */
+body::before {
+ content: " ";
+ display: block;
+ position: fixed;
+ top: 0; left: 0; bottom: 0; right: 0;
+ background: linear-gradient(rgba(18, 16, 16, 0) 50%, rgba(0, 0, 0, 0.1) 50%),
+ linear-gradient(90deg, rgba(255, 0, 0, 0.03), rgba(0, 255, 0, 0.02), rgba(0, 0, 255, 0.03));
+ background-size: 100% 2px, 3px 100%;
+ pointer-events: none;
+ z-index: 2;
+ opacity: 0.15;
+}
+
+/* Flicker Animation */
+@keyframes flicker {
+ 0% { opacity: 0.97; }
+ 5% { opacity: 0.95; }
+ 10% { opacity: 0.97; }
+ 15% { opacity: 0.94; }
+ 20% { opacity: 0.98; }
+ 50% { opacity: 0.95; }
+ 80% { opacity: 0.96; }
+ 90% { opacity: 0.94; }
+ 100% { opacity: 0.98; }
+}
+
+/* Terminal Window with scrolling enabled */
+#terminal {
+ width: 100%;
+ max-width: 900px;
+ margin: 0 auto;
+ white-space: pre-wrap;
+ word-break: break-word;
+ animation: flicker 4s infinite;
+ height: 400px;
+ overflow-y: auto;
+ position: relative;
+ flex: 1;
+}
+
+#terminal-content {
+ position: absolute;
+ bottom: 0;
+ width: 100%;
+}
+
+.cursor {
+ display: inline-block;
+ width: 10px;
+ height: 16px;
+ background-color: #f7931a;
+ animation: blink 1s step-end infinite;
+ vertical-align: middle;
+ box-shadow: 0 0 5px rgba(247, 147, 26, 0.8);
+}
+
+@keyframes blink {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0; }
+}
+
+/* Neon-inspired color classes */
+.green {
+ color: #39ff14 !important;
+ text-shadow: 0 0 10px #39ff14, 0 0 20px #39ff14;
+}
+
+.blue {
+ color: #00dfff !important;
+ text-shadow: 0 0 10px #00dfff, 0 0 20px #00dfff;
+}
+
+.yellow {
+ color: #ffd700 !important;
+ text-shadow: 0 0 8px #ffd700, 0 0 16px #ffd700;
+}
+
+.white {
+ color: #ffffff !important;
+ text-shadow: 0 0 8px #ffffff, 0 0 16px #ffffff;
+}
+
+.red {
+ color: #ff2d2d !important;
+ text-shadow: 0 0 10px #ff2d2d, 0 0 20px #ff2d2d;
+}
+
+.magenta {
+ color: #ff2d95 !important;
+ text-shadow: 0 0 10px #ff2d95, 0 0 20px #ff2d95;
+}
+
+/* Bitcoin Logo styling with extra neon border */
+#bitcoin-logo {
+ display: block;
+ visibility: hidden;
+ text-align: center;
+ margin: 10px auto;
+ font-size: 10px;
+ line-height: 1;
+ color: #f7931a;
+ text-shadow: 0 0 10px rgba(247, 147, 26, 0.8);
+ white-space: pre;
+ width: 260px;
+ padding: 10px;
+ border: 2px solid #f7931a;
+ background-color: #0a0a0a;
+ box-shadow: 0 0 15px rgba(247, 147, 26, 0.5);
+ font-family: monospace;
+ opacity: 0;
+ transition: opacity 1s ease;
+}
+
+/* Skip Button */
+#skip-button {
+ position: fixed;
+ bottom: 20px;
+ right: 20px;
+ background-color: #f7931a;
+ color: #000;
+ border: none;
+ padding: 10px 15px;
+ border-radius: 5px;
+ cursor: pointer;
+ font-family: 'VT323', monospace;
+ font-size: 16px;
+ box-shadow: 0 0 8px rgba(247, 147, 26, 0.5);
+ transition: all 0.2s ease;
+}
+
+#skip-button:hover {
+ background-color: #ffa32e;
+ box-shadow: 0 0 12px rgba(247, 147, 26, 0.7);
+}
+
+/* Prompt Styling */
+#prompt-container {
+ display: none;
+ white-space: nowrap;
+}
+
+#prompt-text {
+ color: #f7931a;
+ margin-right: 5px;
+ text-shadow: 0 0 5px rgba(247, 147, 26, 0.8);
+ display: inline;
+}
+
+#user-input {
+ background: transparent;
+ border: none;
+ color: #f7931a;
+ font-family: 'VT323', monospace;
+ font-size: 20px;
+ caret-color: transparent;
+ outline: none;
+ width: 20px;
+ padding: 0;
+ margin: 0;
+ text-shadow: 0 0 5px rgba(247, 147, 26, 0.8);
+ display: inline-block;
+ vertical-align: top;
+}
+
+.prompt-cursor {
+ display: inline-block;
+ width: 10px;
+ height: 16px;
+ background-color: #f7931a;
+ animation: blink 1s step-end infinite;
+ vertical-align: middle;
+ box-shadow: 0 0 5px rgba(247, 147, 26, 0.8);
+ position: relative;
+ top: 1px;
+ margin-left: -2px;
+}
+
+/* Mobile Responsiveness */
+@media (max-width: 600px) {
+ body { font-size: 14px; padding: 10px; }
+ #terminal { margin: 0; }
+}
+
+/* Loading and Debug Info */
+#loading-message {
+ text-align: center;
+ margin-bottom: 10px;
+ text-shadow: 0 0 5px rgba(247, 147, 26, 0.8);
+}
+
+#debug-info {
+ position: fixed;
+ bottom: 10px;
+ left: 10px;
+ color: #666;
+ font-size: 12px;
+ z-index: 100;
+}
diff --git a/static/css/common.css b/static/css/common.css
new file mode 100644
index 0000000..1456e94
--- /dev/null
+++ b/static/css/common.css
@@ -0,0 +1,421 @@
+/* Common styling elements shared across all pages */
+:root {
+ --bg-color: #0a0a0a;
+ --bg-gradient: linear-gradient(135deg, #0a0a0a, #1a1a1a);
+ --primary-color: #f7931a;
+ --accent-color: #00ffff;
+ --text-color: #ffffff;
+ --card-padding: 0.5rem;
+ --text-size-base: 16px;
+ --terminal-font: 'VT323', monospace;
+ --header-font: 'Orbitron', sans-serif;
+}
+
+@media (min-width: 768px) {
+ :root {
+ --card-padding: 0.75rem;
+ --text-size-base: 18px;
+ }
+}
+
+/* CRT Screen Effect */
+body::before {
+ content: " ";
+ display: block;
+ position: fixed;
+ top: 0; left: 0; bottom: 0; right: 0;
+ background: linear-gradient(rgba(18, 16, 16, 0) 50%, rgba(0, 0, 0, 0.1) 50%),
+ linear-gradient(90deg, rgba(255, 0, 0, 0.03), rgba(0, 255, 0, 0.02), rgba(0, 0, 255, 0.03));
+ background-size: 100% 2px, 3px 100%;
+ pointer-events: none;
+ z-index: 2;
+ opacity: 0.15;
+}
+
+/* Flicker Animation */
+@keyframes flicker {
+ 0% { opacity: 0.97; }
+ 5% { opacity: 0.95; }
+ 10% { opacity: 0.97; }
+ 15% { opacity: 0.94; }
+ 20% { opacity: 0.98; }
+ 50% { opacity: 0.95; }
+ 80% { opacity: 0.96; }
+ 90% { opacity: 0.94; }
+ 100% { opacity: 0.98; }
+}
+
+body {
+ background: var(--bg-gradient);
+ color: var(--text-color);
+ padding-top: 0.5rem;
+ font-size: var(--text-size-base);
+ font-family: var(--terminal-font);
+ text-shadow: 0 0 5px rgba(255, 255, 255, 0.3);
+}
+
+h1 {
+ font-size: 24px;
+ font-weight: bold;
+ color: var(--primary-color);
+ font-family: var(--header-font);
+ letter-spacing: 1px;
+ text-shadow: 0 0 10px var(--primary-color);
+ animation: flicker 4s infinite;
+}
+
+@media (min-width: 768px) {
+ h1 {
+ font-size: 26px;
+ }
+}
+
+/* Navigation links */
+.navigation-links {
+ display: flex;
+ justify-content: center;
+ margin-top: 10px;
+ margin-bottom: 15px;
+}
+
+.nav-link {
+ padding: 5px 15px;
+ margin: 0 10px;
+ background-color: var(--bg-color);
+ border: 1px solid var(--primary-color);
+ color: var(--primary-color);
+ text-decoration: none;
+ font-family: var(--terminal-font);
+ transition: all 0.3s ease;
+}
+
+.nav-link:hover {
+ background-color: var(--primary-color);
+ color: var(--bg-color);
+ box-shadow: 0 0 10px rgba(247, 147, 26, 0.5);
+}
+
+.nav-link.active {
+ background-color: var(--primary-color);
+ color: var(--bg-color);
+ box-shadow: 0 0 10px rgba(247, 147, 26, 0.5);
+}
+
+/* Top right link */
+#topRightLink {
+ position: absolute;
+ top: 10px;
+ right: 10px;
+ color: grey;
+ text-decoration: none;
+ font-size: 0.9rem;
+ text-shadow: 0 0 5px grey;
+}
+
+/* Card styles */
+.card,
+.card-header,
+.card-body,
+.card-footer {
+ border-radius: 0 !important;
+}
+
+/* Enhanced card with scanlines */
+.card {
+ background-color: var(--bg-color);
+ border: 1px solid var(--primary-color);
+ margin-bottom: 0.5rem;
+ padding: var(--card-padding);
+ flex: 1;
+ position: relative;
+ overflow: hidden;
+ box-shadow: 0 0 5px rgba(247, 147, 26, 0.3);
+}
+
+/* Scanline effect for cards */
+.card::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: repeating-linear-gradient(
+ 0deg,
+ rgba(0, 0, 0, 0.05),
+ rgba(0, 0, 0, 0.05) 1px,
+ transparent 1px,
+ transparent 2px
+ );
+ pointer-events: none;
+ z-index: 1;
+}
+
+.card-header {
+ background-color: #000;
+ color: var(--primary-color);
+ font-weight: bold;
+ padding: 0.3rem 0.5rem;
+ font-size: 1.1rem;
+ border-bottom: 1px solid var(--primary-color);
+ text-shadow: 0 0 5px var(--primary-color);
+ animation: flicker 4s infinite;
+ font-family: var(--header-font);
+}
+
+.card-body hr {
+ border-top: 1px solid var(--primary-color);
+ margin: 0.25rem 0;
+}
+
+/* Connection status indicator */
+#connectionStatus {
+ display: none;
+ position: fixed;
+ top: 10px;
+ right: 10px;
+ background: rgba(255,0,0,0.7);
+ color: white;
+ padding: 10px;
+ border-radius: 5px;
+ z-index: 9999;
+ font-size: 0.9rem;
+ text-shadow: 0 0 5px rgba(255, 0, 0, 0.8);
+ box-shadow: 0 0 10px rgba(255, 0, 0, 0.5);
+}
+
+/* Last Updated text with subtle animation */
+#lastUpdated {
+ animation: flicker 5s infinite;
+ text-align: center;
+}
+
+/* Cursor blink for terminal feel */
+#terminal-cursor {
+ display: inline-block;
+ width: 10px;
+ height: 16px;
+ background-color: #f7931a;
+ margin-left: 2px;
+ animation: blink 1s step-end infinite;
+ vertical-align: middle;
+ box-shadow: 0 0 5px rgba(247, 147, 26, 0.8);
+}
+
+@keyframes blink {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0; }
+}
+
+/* Container */
+.container-fluid {
+ max-width: 1200px;
+ margin: 0 auto;
+ padding-left: 1rem;
+ padding-right: 1rem;
+ position: relative;
+}
+
+/* Status indicators */
+.online-dot {
+ display: inline-block;
+ width: 8px;
+ height: 8px;
+ background: #32CD32;
+ border-radius: 50%;
+ margin-left: 0.5em;
+ position: relative;
+ top: -2px;
+ animation: glow 1s infinite;
+ box-shadow: 0 0 10px #32CD32, 0 0 20px #32CD32;
+}
+
+@keyframes glow {
+ 0%, 100% { box-shadow: 0 0 10px #32CD32, 0 0 15px #32CD32; }
+ 50% { box-shadow: 0 0 15px #32CD32, 0 0 25px #32CD32; }
+}
+
+.offline-dot {
+ display: inline-block;
+ width: 8px;
+ height: 8px;
+ background: red;
+ border-radius: 50%;
+ margin-left: 0.5em;
+ animation: glowRed 1s infinite;
+ box-shadow: 0 0 10px red, 0 0 20px red;
+}
+
+@keyframes glowRed {
+ 0%, 100% { box-shadow: 0 0 10px red, 0 0 15px red; }
+ 50% { box-shadow: 0 0 15px red, 0 0 25px red; }
+}
+
+/* Color utility classes */
+.green-glow, .status-green {
+ color: #39ff14 !important;
+ text-shadow: 0 0 10px #39ff14, 0 0 20px #39ff14;
+}
+
+.red-glow, .status-red {
+ color: #ff2d2d !important;
+ text-shadow: 0 0 10px #ff2d2d, 0 0 20px #ff2d2d;
+}
+
+.yellow-glow {
+ color: #ffd700 !important;
+ text-shadow: 0 0 6px #ffd700, 0 0 12px #ffd700;
+}
+
+.blue-glow {
+ color: #00dfff !important;
+ text-shadow: 0 0 6px #00dfff, 0 0 12px #00dfff;
+}
+
+.white-glow {
+ color: #ffffff !important;
+ text-shadow: 0 0 6px #ffffff, 0 0 12px #ffffff;
+}
+
+/* Basic color classes for backward compatibility */
+.green {
+ color: #39ff14 !important;
+ text-shadow: 0 0 10px #39ff14, 0 0 20px #39ff14;
+}
+
+.blue {
+ color: #00dfff !important;
+ text-shadow: 0 0 10px #00dfff, 0 0 20px #00dfff;
+}
+
+.yellow {
+ color: #ffd700 !important;
+ text-shadow: 0 0 8px #ffd700, 0 0 16px #ffd700;
+}
+
+.white {
+ color: #ffffff !important;
+ text-shadow: 0 0 8px #ffffff, 0 0 16px #ffffff;
+}
+
+.red {
+ color: #ff2d2d !important;
+ text-shadow: 0 0 10px #ff2d2d, 0 0 20px #ff2d2d;
+}
+
+.magenta {
+ color: #ff2d95 !important;
+ text-shadow: 0 0 10px #ff2d95, 0 0 20px #ff2d95;
+}
+
+/* Bitcoin Progress Bar Styles */
+.bitcoin-progress-container {
+ width: 100%;
+ max-width: 300px;
+ height: 20px;
+ background-color: #111;
+ border: 1px solid var(--primary-color);
+ border-radius: 0;
+ margin: 0.5rem auto;
+ position: relative;
+ overflow: hidden;
+ box-shadow: 0 0 8px rgba(247, 147, 26, 0.5);
+ align-self: center;
+}
+
+.bitcoin-progress-inner {
+ height: 100%;
+ width: 0;
+ background: linear-gradient(90deg, #f7931a, #ffa500);
+ border-radius: 0;
+ transition: width 0.3s ease;
+ position: relative;
+ overflow: hidden;
+}
+
+.bitcoin-progress-inner::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: linear-gradient(90deg,
+ rgba(255, 255, 255, 0.1) 0%,
+ rgba(255, 255, 255, 0.2) 20%,
+ rgba(255, 255, 255, 0.1) 40%);
+ animation: shimmer 2s infinite;
+}
+
+@keyframes shimmer {
+ 0% { transform: translateX(-100%); }
+ 100% { transform: translateX(100%); }
+}
+
+.bitcoin-icons {
+ position: absolute;
+ top: 50%;
+ left: 0;
+ width: 100%;
+ transform: translateY(-50%);
+ display: flex;
+ justify-content: space-around;
+ font-size: 12px;
+ color: rgba(0, 0, 0, 0.7);
+}
+
+.glow-effect {
+ box-shadow: 0 0 15px #f7931a, 0 0 25px #f7931a;
+ animation: pulse 1s infinite;
+}
+
+/* Extra styling for when server update is late */
+.waiting-for-update {
+ animation: waitingPulse 2s infinite !important;
+}
+
+@keyframes waitingPulse {
+ 0%, 100% { box-shadow: 0 0 10px #f7931a, 0 0 15px #f7931a; opacity: 0.8; }
+ 50% { box-shadow: 0 0 20px #f7931a, 0 0 35px #f7931a; opacity: 1; }
+}
+
+@keyframes pulse {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0.8; }
+}
+
+#progress-text {
+ font-size: 1rem;
+ color: var(--primary-color);
+ margin-top: 0.3rem;
+ text-shadow: 0 0 5px var(--primary-color);
+ text-align: center;
+ width: 100%;
+}
+
+/* Mobile responsiveness */
+@media (max-width: 576px) {
+ .container-fluid {
+ padding-left: 0.5rem;
+ padding-right: 0.5rem;
+ }
+
+ .card-body {
+ padding: 0.5rem;
+ }
+
+ h1 {
+ font-size: 22px;
+ }
+
+ .card-header {
+ font-size: 1rem;
+ }
+
+ #topRightLink {
+ position: static;
+ display: block;
+ text-align: right;
+ margin-bottom: 0.5rem;
+ }
+}
diff --git a/static/css/dashboard.css b/static/css/dashboard.css
new file mode 100644
index 0000000..7c59c8f
--- /dev/null
+++ b/static/css/dashboard.css
@@ -0,0 +1,190 @@
+/* Specific styles for the main dashboard */
+
+#graphContainer {
+ background-color: #000;
+ padding: 0.5rem;
+ margin-bottom: 1rem;
+ height: 230px;
+ border: 1px solid var(--primary-color);
+ box-shadow: 0 0 10px rgba(247, 147, 26, 0.2);
+ position: relative;
+}
+
+/* Add scanline effect to graph */
+#graphContainer::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: repeating-linear-gradient(
+ 0deg,
+ rgba(0, 0, 0, 0.1),
+ rgba(0, 0, 0, 0.1) 1px,
+ transparent 1px,
+ transparent 2px
+ );
+ pointer-events: none;
+ z-index: 1;
+}
+
+/* Override for Payout & Misc card */
+#payoutMiscCard {
+ margin-bottom: 0.5rem;
+}
+
+/* Row equal height for card alignment */
+.row.equal-height {
+ display: flex;
+ flex-wrap: wrap;
+ margin-bottom: 1rem;
+}
+
+.row.equal-height > [class*="col-"] {
+ display: flex;
+ margin-bottom: 0.5rem;
+}
+
+.row.equal-height > [class*="col-"] .card {
+ width: 100%;
+}
+
+/* Arrow indicator styles */
+.arrow {
+ display: inline-block;
+ font-weight: bold;
+ margin-left: 0.5rem;
+}
+
+/* Bounce animations for indicators */
+@keyframes bounceUp {
+ 0% { transform: translateY(0); }
+ 25% { transform: translateY(-2px); }
+ 50% { transform: translateY(0); }
+ 75% { transform: translateY(-2px); }
+ 100% { transform: translateY(0); }
+}
+
+@keyframes bounceDown {
+ 0% { transform: translateY(0); }
+ 25% { transform: translateY(2px); }
+ 50% { transform: translateY(0); }
+ 75% { transform: translateY(2px); }
+ 100% { transform: translateY(0); }
+}
+
+.bounce-up {
+ animation: bounceUp 1s infinite;
+}
+
+.bounce-down {
+ animation: bounceDown 1s infinite;
+}
+
+.chevron {
+ font-size: 0.8rem;
+ position: relative;
+ top: 3px;
+}
+
+/* Refresh timer container */
+#refreshUptime {
+ text-align: center;
+ margin-top: 0.5rem;
+}
+
+#refreshContainer {
+ display: flex;
+ flex-direction: column;
+ justify-content: center;
+ align-items: center;
+ width: 100%;
+}
+
+#uptimeTimer strong {
+ font-weight: bold;
+}
+
+#uptimeTimer {
+ margin-top: 0;
+}
+
+/* Metric styling by category */
+.metric-value {
+ color: var(--text-color);
+ font-weight: bold;
+ text-shadow: 0 0 6px rgba(255, 255, 255, 0.6);
+}
+
+/* Yellow color family (BTC price, sats metrics, time to payout) */
+#btc_price,
+#daily_mined_sats,
+#monthly_mined_sats,
+#estimated_earnings_per_day_sats,
+#estimated_earnings_next_block_sats,
+#estimated_rewards_in_window_sats,
+#est_time_to_payout {
+ color: #ffd700;
+ text-shadow: 0 0 6px rgba(255, 215, 0, 0.6);
+}
+
+/* Green color family (profits, earnings) */
+#unpaid_earnings,
+#daily_revenue,
+#daily_profit_usd,
+#monthly_profit_usd {
+ color: #32CD32;
+ text-shadow: 0 0 6px rgba(50, 205, 50, 0.6);
+}
+
+/* Red color family (costs) */
+#daily_power_cost {
+ color: #ff5555 !important;
+ text-shadow: 0 0 6px rgba(255, 85, 85, 0.6);
+}
+
+/* White metrics (general stats) */
+.metric-value.white,
+#block_number,
+#network_hashrate,
+#difficulty,
+#workers_hashing,
+#last_share,
+#blocks_found,
+#last_block_height {
+ color: #ffffff;
+ text-shadow: 0 0 6px rgba(255, 255, 255, 0.6);
+}
+
+/* Blue metrics (time data) */
+#last_block_time {
+ color: #00dfff;
+ text-shadow: 0 0 6px rgba(0, 223, 255, 0.6);
+}
+
+.card-body strong {
+ color: var(--primary-color);
+ margin-right: 0.25rem;
+ text-shadow: 0 0 2px var(--primary-color);
+}
+
+.card-body p {
+ margin: 0.25rem 0;
+ line-height: 1.2;
+}
+
+/* Hidden Congrats Message */
+#congratsMessage {
+ display: none;
+ position: fixed;
+ top: 20px;
+ left: 50%;
+ transform: translateX(-50%);
+ z-index: 1000;
+ background: #f7931a;
+ color: #000;
+ padding: 10px;
+ border-radius: 5px;
+ box-shadow: 0 0 15px rgba(247, 147, 26, 0.7);
+}
diff --git a/static/css/error.css b/static/css/error.css
new file mode 100644
index 0000000..44aacc1
--- /dev/null
+++ b/static/css/error.css
@@ -0,0 +1,138 @@
+:root {
+ --bg-color: #0a0a0a;
+ --bg-gradient: linear-gradient(135deg, #0a0a0a, #1a1a1a);
+ --primary-color: #f7931a;
+ --text-color: white;
+ --terminal-font: 'VT323', monospace;
+ --header-font: 'Orbitron', sans-serif;
+}
+
+/* CRT Screen Effect */
+body::before {
+ content: " ";
+ display: block;
+ position: fixed;
+ top: 0; left: 0; bottom: 0; right: 0;
+ background: linear-gradient(rgba(18, 16, 16, 0) 50%, rgba(0, 0, 0, 0.1) 50%),
+ linear-gradient(90deg, rgba(255, 0, 0, 0.03), rgba(0, 255, 0, 0.02), rgba(0, 0, 255, 0.03));
+ background-size: 100% 2px, 3px 100%;
+ pointer-events: none;
+ z-index: 2;
+ opacity: 0.15;
+}
+
+/* Flicker Animation */
+@keyframes flicker {
+ 0% { opacity: 0.97; }
+ 5% { opacity: 0.95; }
+ 10% { opacity: 0.97; }
+ 15% { opacity: 0.94; }
+ 20% { opacity: 0.98; }
+ 50% { opacity: 0.95; }
+ 80% { opacity: 0.96; }
+ 90% { opacity: 0.94; }
+ 100% { opacity: 0.98; }
+}
+
+body {
+ background: var(--bg-gradient);
+ color: var(--text-color);
+ padding-top: 50px;
+ font-family: var(--terminal-font);
+ text-shadow: 0 0 5px rgba(255, 255, 255, 0.3);
+}
+
+a.btn-primary {
+ background-color: var(--primary-color);
+ border-color: var(--primary-color);
+ color: black;
+ margin-top: 20px;
+ font-family: var(--header-font);
+ text-shadow: none;
+ box-shadow: 0 0 10px rgba(247, 147, 26, 0.5);
+ transition: all 0.3s ease;
+}
+
+a.btn-primary:hover {
+ background-color: #ffa64d;
+ box-shadow: 0 0 15px rgba(247, 147, 26, 0.7);
+}
+
+/* Enhanced error container with scanlines */
+.error-container {
+ max-width: 600px;
+ margin: 0 auto;
+ text-align: center;
+ padding: 2rem;
+ border: 1px solid var(--primary-color);
+ border-radius: 0;
+ background-color: rgba(0, 0, 0, 0.3);
+ box-shadow: 0 0 15px rgba(247, 147, 26, 0.3);
+ position: relative;
+ overflow: hidden;
+ animation: flicker 4s infinite;
+}
+
+/* Scanline effect for error container */
+.error-container::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: repeating-linear-gradient(
+ 0deg,
+ rgba(0, 0, 0, 0.1),
+ rgba(0, 0, 0, 0.1) 1px,
+ transparent 1px,
+ transparent 2px
+ );
+ pointer-events: none;
+ z-index: 1;
+}
+
+h1 {
+ color: var(--primary-color);
+ margin-bottom: 1rem;
+ font-family: var(--header-font);
+ font-weight: bold;
+ text-shadow: 0 0 10px var(--primary-color);
+ position: relative;
+ z-index: 2;
+}
+
+p {
+ margin-bottom: 1.5rem;
+ font-size: 1.5rem;
+ position: relative;
+ z-index: 2;
+ color: #ff5555;
+ text-shadow: 0 0 8px rgba(255, 85, 85, 0.6);
+}
+
+/* Cursor blink for terminal feel */
+.terminal-cursor {
+ display: inline-block;
+ width: 10px;
+ height: 20px;
+ background-color: #f7931a;
+ margin-left: 2px;
+ animation: blink 1s step-end infinite;
+ vertical-align: middle;
+ box-shadow: 0 0 5px rgba(247, 147, 26, 0.8);
+}
+
+@keyframes blink {
+ 0%, 100% { opacity: 1; }
+ 50% { opacity: 0; }
+}
+
+/* Error code styling */
+.error-code {
+ font-family: var(--terminal-font);
+ font-size: 1.2rem;
+ color: #00dfff;
+ text-shadow: 0 0 10px #00dfff, 0 0 20px #00dfff;
+ margin-bottom: 1rem;
+}
diff --git a/static/css/retro-refresh.css b/static/css/retro-refresh.css
new file mode 100644
index 0000000..9e987ff
--- /dev/null
+++ b/static/css/retro-refresh.css
@@ -0,0 +1,369 @@
+/* Retro Floating Refresh Bar Styles */
+:root {
+ --terminal-bg: #000000;
+ --terminal-border: #f7931a;
+ --terminal-text: #f7931a;
+ --terminal-glow: rgba(247, 147, 26, 0.7);
+ --terminal-width: 300px;
+}
+
+/* Adjust width for desktop */
+@media (min-width: 768px) {
+ :root {
+ --terminal-width: 340px;
+ }
+}
+
+/* Remove the existing refresh timer container styles */
+#refreshUptime {
+ visibility: hidden !important;
+ height: 0 !important;
+ overflow: hidden !important;
+ margin: 0 !important;
+ padding: 0 !important;
+}
+
+/* Add padding to the bottom of the page to prevent floating bar from covering content
+body {
+ padding-bottom: 100px !important;
+}
+*/
+/* Floating Retro Terminal Container */
+#retro-terminal-bar {
+ position: fixed;
+ bottom: 20px;
+ left: 50%;
+ transform: translateX(-50%);
+ width: var(--terminal-width);
+ background-color: var(--terminal-bg);
+ border: 2px solid var(--terminal-border);
+ /* box-shadow: 0 0 15px var(--terminal-glow); */
+ z-index: 1000;
+ font-family: 'VT323', monospace;
+ overflow: hidden;
+ padding: 5px;
+}
+
+/* Desktop positioning (bottom right) */
+@media (min-width: 768px) {
+ #retro-terminal-bar {
+ left: auto;
+ right: 20px;
+ transform: none;
+ }
+}
+
+/* Terminal header with control buttons */
+/* Update the terminal title to match card headers */
+.terminal-header {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 5px;
+ border-bottom: 1px solid var(--terminal-border);
+ padding-bottom: 3px;
+ background-color: #000; /* Match card header background */
+}
+
+.terminal-title {
+ color: var(--primary-color);
+ font-weight: bold;
+ font-size: 1.1rem; /* Match card header font size */
+ border-bottom: none;
+ text-shadow: 0 0 5px var(--primary-color);
+ animation: flicker 4s infinite; /* Add flicker animation from card headers */
+ font-family: var(--header-font); /* Use the same font variable */
+ padding: 0.3rem 0; /* Match card header padding */
+ letter-spacing: 1px;
+}
+
+/* Make sure we're using the flicker animation defined in the main CSS */
+@keyframes flicker {
+ 0% { opacity: 0.97; }
+ 5% { opacity: 0.95; }
+ 10% { opacity: 0.97; }
+ 15% { opacity: 0.94; }
+ 20% { opacity: 0.98; }
+ 50% { opacity: 0.95; }
+ 80% { opacity: 0.96; }
+ 90% { opacity: 0.94; }
+ 100% { opacity: 0.98; }
+}
+
+.terminal-controls {
+ display: flex;
+ gap: 5px;
+}
+
+.terminal-dot {
+ width: 8px;
+ height: 8px;
+ border-radius: 50%;
+ background-color: #555;
+ transition: background-color 0.3s;
+}
+
+.terminal-dot:hover {
+ background-color: #999;
+ cursor: pointer;
+}
+
+.terminal-dot.minimize:hover {
+ background-color: #ffcc00;
+}
+
+.terminal-dot.close:hover {
+ background-color: #ff3b30;
+}
+
+/* Terminal content area */
+.terminal-content {
+ position: relative;
+ color: #ffffff;
+ padding: 5px 0;
+}
+
+/* Scanline effect for authentic CRT look */
+.terminal-content::before {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: repeating-linear-gradient(
+ 0deg,
+ rgba(0, 0, 0, 0.15),
+ rgba(0, 0, 0, 0.15) 1px,
+ transparent 1px,
+ transparent 2px
+ );
+ pointer-events: none;
+ z-index: 1;
+ animation: flicker 0.15s infinite;
+}
+
+@keyframes flicker {
+ 0% { opacity: 1.0; }
+ 50% { opacity: 0.98; }
+ 100% { opacity: 1.0; }
+}
+
+/* Enhanced Progress Bar with tick marks */
+#retro-terminal-bar .bitcoin-progress-container {
+ width: 100%;
+ height: 20px;
+ background-color: #111;
+ border: 1px solid var(--terminal-border);
+ margin-bottom: 10px;
+ position: relative;
+ overflow: hidden;
+ box-shadow: inset 0 0 10px rgba(0, 0, 0, 0.8);
+}
+
+/* Tick marks on progress bar */
+.progress-ticks {
+ position: absolute;
+ top: 0;
+ left: 0;
+ width: 100%;
+ height: 100%;
+ display: flex;
+ justify-content: space-between;
+ padding: 0 5px;
+ color: rgba(255, 255, 255, 0.6);
+ font-size: 10px;
+ pointer-events: none;
+ z-index: 3;
+}
+
+.progress-ticks span {
+ display: flex;
+ align-items: flex-end;
+ height: 100%;
+ padding-bottom: 2px;
+}
+
+.tick-mark {
+ position: absolute;
+ top: 0;
+ width: 1px;
+ height: 5px;
+ background-color: rgba(255, 255, 255, 0.4);
+}
+
+.tick-mark.major {
+ height: 8px;
+ background-color: rgba(255, 255, 255, 0.6);
+}
+
+/* The actual progress bar */
+#retro-terminal-bar #bitcoin-progress-inner {
+ height: 100%;
+ width: 0;
+ background: linear-gradient(90deg, #f7931a, #ffa500);
+ position: relative;
+ transition: width 1s linear;
+}
+
+/* Position the original inner container correctly */
+#retro-terminal-bar #refreshContainer {
+ display: block;
+ width: 100%;
+}
+
+/* Blinking scan line animation */
+.scan-line {
+ position: absolute;
+ height: 2px;
+ width: 100%;
+ background-color: rgba(255, 255, 255, 0.7);
+ animation: scan 3s linear infinite;
+ box-shadow: 0 0 8px 1px rgba(255, 255, 255, 0.5);
+ z-index: 2;
+}
+
+@keyframes scan {
+ 0% { top: -2px; }
+ 100% { top: 22px; }
+}
+
+/* Text styling */
+#retro-terminal-bar #progress-text {
+ font-size: 16px;
+ color: var(--terminal-text);
+ text-shadow: 0 0 5px var(--terminal-text);
+ margin-top: 5px;
+ text-align: center;
+ position: relative;
+ z-index: 2;
+}
+
+#retro-terminal-bar #uptimeTimer {
+ font-size: 16px;
+ color: var(--terminal-text);
+ text-shadow: 0 0 5px var(--terminal-text);
+ text-align: center;
+ position: relative;
+ z-index: 2;
+ border-top: 1px solid rgba(247, 147, 26, 0.3);
+ padding-top: 5px;
+ margin-top: 5px;
+}
+
+/* Terminal cursor */
+#retro-terminal-bar #terminal-cursor {
+ display: inline-block;
+ width: 8px;
+ height: 14px;
+ background-color: var(--terminal-text);
+ margin-left: 2px;
+ animation: blink 1s step-end infinite;
+ box-shadow: 0 0 8px var(--terminal-text);
+}
+
+/* Glowing effect during the last few seconds */
+#retro-terminal-bar #bitcoin-progress-inner.glow-effect {
+ box-shadow: 0 0 15px #f7931a, 0 0 25px #f7931a;
+}
+
+#retro-terminal-bar .waiting-for-update {
+ animation: waitingPulse 2s infinite !important;
+}
+
+@keyframes waitingPulse {
+ 0%, 100% { box-shadow: 0 0 10px #f7931a, 0 0 15px #f7931a; opacity: 0.8; }
+ 50% { box-shadow: 0 0 20px #f7931a, 0 0 35px #f7931a; opacity: 1; }
+}
+
+/* Status indicators */
+.status-indicators {
+ display: flex;
+ justify-content: space-between;
+ margin-bottom: 5px;
+ font-size: 12px;
+ color: #aaa;
+}
+
+.status-indicator {
+ display: flex;
+ align-items: center;
+}
+
+.status-dot {
+ width: 6px;
+ height: 6px;
+ border-radius: 50%;
+ margin-right: 4px;
+}
+
+.status-dot.connected {
+ background-color: #32CD32;
+ box-shadow: 0 0 5px #32CD32;
+ animation: pulse 2s infinite;
+}
+
+@keyframes pulse {
+ 0% { opacity: 0.8; }
+ 50% { opacity: 1; }
+ 100% { opacity: 0.8; }
+}
+
+/* Collapse/expand functionality */
+#retro-terminal-bar.collapsed .terminal-content {
+ display: none;
+}
+
+#retro-terminal-bar.collapsed {
+ width: 180px;
+}
+
+/* On desktop, move the collapsed bar to bottom right */
+@media (min-width: 768px) {
+ #retro-terminal-bar.collapsed {
+ right: 20px;
+ transform: none;
+ }
+}
+
+/* Show button */
+#show-terminal-button {
+ position: fixed;
+ bottom: 10px;
+ right: 10px;
+ z-index: 1000;
+ background-color: #f7931a;
+ color: #000;
+ border: none;
+ padding: 8px 12px;
+ cursor: pointer;
+ font-family: 'VT323', monospace;
+ font-size: 14px;
+ box-shadow: 0 0 10px rgba(247, 147, 26, 0.5);
+}
+
+#show-terminal-button:hover {
+ background-color: #ffaa33;
+}
+
+/* Mobile responsiveness */
+@media (max-width: 576px) {
+ #retro-terminal-bar {
+ width: 280px;
+ bottom: 10px;
+ }
+
+ .terminal-title {
+ font-size: 14px;
+ }
+
+ .terminal-dot {
+ width: 6px;
+ height: 6px;
+ }
+
+ #show-terminal-button {
+ padding: 6px 10px;
+ font-size: 12px;
+ }
+}
\ No newline at end of file
diff --git a/static/css/workers.css b/static/css/workers.css
new file mode 100644
index 0000000..61d7b2f
--- /dev/null
+++ b/static/css/workers.css
@@ -0,0 +1,333 @@
+/* Styles specific to the workers page */
+
+/* Search and filter controls */
+.controls-bar {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ margin-bottom: 15px;
+ flex-wrap: wrap;
+ gap: 10px;
+}
+
+.search-box {
+ background-color: var(--bg-color);
+ border: 1px solid var(--primary-color);
+ color: var(--text-color);
+ padding: 5px 10px;
+ font-family: var(--terminal-font);
+ min-width: 200px;
+}
+
+.search-box:focus {
+ outline: none;
+ box-shadow: 0 0 8px rgba(247, 147, 26, 0.5);
+}
+
+.filter-button {
+ background-color: var(--bg-color);
+ border: 1px solid var(--primary-color);
+ color: var(--primary-color);
+ padding: 5px 10px;
+ font-family: var(--terminal-font);
+ cursor: pointer;
+}
+
+.filter-button.active {
+ background-color: var(--primary-color);
+ color: var(--bg-color);
+}
+
+/* Worker grid for worker cards */
+.worker-grid {
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(250px, 1fr));
+ gap: 10px;
+ margin-top: 10px;
+}
+
+/* Worker card styles */
+.worker-card {
+ background-color: var(--bg-color);
+ border: 1px solid var(--primary-color);
+ box-shadow: 0 0 5px rgba(247, 147, 26, 0.3);
+ position: relative;
+ overflow: hidden;
+ padding: 10px;
+ height: 100%;
+ animation: fadeIn 0.3s ease;
+}
+
+@keyframes fadeIn {
+ from { opacity: 0; }
+ to { opacity: 1; }
+}
+
+.worker-card::after {
+ content: '';
+ position: absolute;
+ top: 0;
+ left: 0;
+ right: 0;
+ bottom: 0;
+ background: repeating-linear-gradient(
+ 0deg,
+ rgba(0, 0, 0, 0.05),
+ rgba(0, 0, 0, 0.05) 1px,
+ transparent 1px,
+ transparent 2px
+ );
+ pointer-events: none;
+ z-index: 1;
+}
+
+.worker-card-online {
+ border-color: #32CD32;
+ box-shadow: 0 0 8px rgba(50, 205, 50, 0.4);
+}
+
+.worker-card-offline {
+ border-color: #ff5555;
+ box-shadow: 0 0 8px rgba(255, 85, 85, 0.4);
+}
+
+.worker-name {
+ color: var(--primary-color);
+ font-weight: bold;
+ font-size: 1.2rem;
+ text-shadow: 0 0 5px var(--primary-color);
+ margin-bottom: 5px;
+ white-space: nowrap;
+ overflow: hidden;
+ text-overflow: ellipsis;
+ z-index: 2;
+ position: relative;
+}
+
+.worker-stats {
+ margin-top: 8px;
+ font-size: 0.9rem;
+ z-index: 2;
+ position: relative;
+}
+
+.worker-stats-row {
+ display: flex;
+ justify-content: space-between;
+ margin-bottom: 4px;
+}
+
+.worker-stats-label {
+ color: #aaa;
+}
+
+.hashrate-bar {
+ height: 4px;
+ background: linear-gradient(90deg, #1137F5, #39ff14);
+ margin-top: 4px;
+ margin-bottom: 8px;
+ position: relative;
+ z-index: 2;
+}
+
+/* Worker badge */
+.worker-type {
+ position: absolute;
+ top: 10px;
+ right: 10px;
+ font-size: 0.7rem;
+ background-color: rgba(0, 0, 0, 0.6);
+ border: 1px solid var(--primary-color);
+ color: var(--primary-color);
+ padding: 1px 5px;
+ z-index: 2;
+}
+
+/* Status badges */
+.status-badge {
+ display: inline-block;
+ font-size: 0.8rem;
+ padding: 2px 8px;
+ border-radius: 3px;
+ z-index: 2;
+ position: relative;
+}
+
+.status-badge-online {
+ background-color: rgba(50, 205, 50, 0.2);
+ border: 1px solid #32CD32;
+ color: #32CD32;
+ text-shadow: 0 0 5px rgba(50, 205, 50, 0.8);
+}
+
+.status-badge-offline {
+ background-color: rgba(255, 85, 85, 0.2);
+ border: 1px solid #ff5555;
+ color: #ff5555;
+ text-shadow: 0 0 5px rgba(255, 85, 85, 0.8);
+}
+
+/* Stats bars */
+.stats-bar-container {
+ width: 100%;
+ height: 4px;
+ background-color: rgba(255, 255, 255, 0.1);
+ margin-top: 2px;
+ margin-bottom: 5px;
+ position: relative;
+ z-index: 2;
+}
+
+.stats-bar {
+ height: 100%;
+ background: linear-gradient(90deg, #1137F5, #39ff14);
+}
+
+/* Summary stats in the header */
+.summary-stats {
+ display: flex;
+ flex-wrap: wrap;
+ justify-content: space-around;
+ gap: 15px;
+ margin: 15px 0;
+}
+
+.summary-stat {
+ text-align: center;
+ min-width: 120px;
+}
+
+.summary-stat-value {
+ font-size: 1.6rem;
+ font-weight: bold;
+ margin-bottom: 5px;
+}
+
+.summary-stat-label {
+ font-size: 0.9rem;
+ color: #aaa;
+}
+
+/* Worker count ring */
+.worker-ring {
+ width: 90px;
+ height: 90px;
+ border-radius: 50%;
+ position: relative;
+ margin: 0 auto;
+ background: conic-gradient(
+ #32CD32 0% calc(var(--online-percent) * 100%),
+ #ff5555 calc(var(--online-percent) * 100%) 100%
+ );
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ box-shadow: 0 0 15px rgba(247, 147, 26, 0.3);
+}
+
+.worker-ring-inner {
+ width: 70px;
+ height: 70px;
+ border-radius: 50%;
+ background-color: var(--bg-color);
+ display: flex;
+ align-items: center;
+ justify-content: center;
+ font-size: 1.2rem;
+ font-weight: bold;
+ color: var(--text-color);
+}
+
+/* Mini hashrate chart */
+.mini-chart {
+ height: 40px;
+ width: 100%;
+ margin-top: 5px;
+ position: relative;
+ z-index: 2;
+}
+
+.loading-fade {
+ opacity: 0.6;
+ transition: opacity 0.3s ease;
+}
+
+/* Mobile responsiveness */
+@media (max-width: 576px) {
+ .controls-bar {
+ flex-direction: column;
+ align-items: stretch;
+ }
+
+ .search-box {
+ width: 100%;
+ }
+
+ .filter-buttons {
+ display: flex;
+ justify-content: space-between;
+ }
+
+ .worker-grid {
+ grid-template-columns: 1fr;
+ }
+
+ .summary-stats {
+ flex-direction: column;
+ align-items: center;
+ }
+
+ .summary-stat {
+ width: 100%;
+ }
+}
+
+@media (max-width: 768px) {
+ /* Fix for "Made by" link collision with title */
+ #topRightLink {
+ position: static !important;
+ display: block !important;
+ text-align: right !important;
+ margin-bottom: 0.5rem !important;
+ margin-top: 0 !important;
+ font-size: 0.8rem !important;
+ }
+
+ /* Adjust heading for better mobile display */
+ h1 {
+ font-size: 20px !important;
+ line-height: 1.2 !important;
+ margin-top: 0.5rem !important;
+ padding-top: 0 !important;
+ }
+
+ /* Improve container padding for mobile */
+ .container-fluid {
+ padding-left: 0.5rem !important;
+ padding-right: 0.5rem !important;
+ }
+
+ /* Ensure top section has appropriate spacing */
+ .row.mb-3 {
+ margin-top: 0.5rem !important;
+ }
+}
+
+/* Add a more aggressive breakpoint for very small screens */
+@media (max-width: 380px) {
+ #topRightLink {
+ margin-bottom: 0.75rem !important;
+ font-size: 0.7rem !important;
+ }
+
+ h1 {
+ font-size: 18px !important;
+ margin-bottom: 0.5rem !important;
+ }
+
+ /* Further reduce container padding */
+ .container-fluid {
+ padding-left: 0.3rem !important;
+ padding-right: 0.3rem !important;
+ }
+}
diff --git a/static/js/main.js b/static/js/main.js
new file mode 100644
index 0000000..7b7ead6
--- /dev/null
+++ b/static/js/main.js
@@ -0,0 +1,801 @@
+"use strict";
+
+// Global variables
+let previousMetrics = {};
+let persistentArrows = {};
+let serverTimeOffset = 0;
+let serverStartTime = null;
+let latestMetrics = null;
+let initialLoad = true;
+let trendData = [];
+let trendLabels = [];
+let trendChart = null;
+let connectionRetryCount = 0;
+let maxRetryCount = 10;
+let reconnectionDelay = 1000; // Start with 1 second
+let pingInterval = null;
+let lastPingTime = Date.now();
+let connectionLostTimeout = null;
+
+// Bitcoin-themed progress bar functionality
+let progressInterval;
+let currentProgress = 0;
+let lastUpdateTime = Date.now();
+let expectedUpdateInterval = 60000; // Expected server update interval (60 seconds)
+const PROGRESS_MAX = 60; // 60 seconds for a complete cycle
+
+// Initialize the progress bar and start the animation
+function initProgressBar() {
+ // Clear any existing interval
+ if (progressInterval) {
+ clearInterval(progressInterval);
+ }
+
+ // Set last update time to now
+ lastUpdateTime = Date.now();
+
+ // Reset progress with initial offset
+ currentProgress = 1; // Start at 1 instead of 0 for offset
+ updateProgressBar(currentProgress);
+
+ // Start the interval
+ progressInterval = setInterval(function() {
+ // Calculate elapsed time since last update
+ const elapsedTime = Date.now() - lastUpdateTime;
+
+ // Calculate progress percentage based on elapsed time with +1 second offset
+ const secondsElapsed = Math.floor(elapsedTime / 1000) + 1; // Add 1 second offset
+
+ // If we've gone past the expected update time
+ if (secondsElapsed >= PROGRESS_MAX) {
+ // Keep the progress bar full but show waiting state
+ currentProgress = PROGRESS_MAX;
+ } else {
+ // Normal progress with offset
+ currentProgress = secondsElapsed;
+ }
+
+ updateProgressBar(currentProgress);
+ }, 1000);
+}
+
+// Update the progress bar display
+function updateProgressBar(seconds) {
+ const progressPercent = (seconds / PROGRESS_MAX) * 100;
+ $("#bitcoin-progress-inner").css("width", progressPercent + "%");
+
+ // Add glowing effect when close to completion
+ if (progressPercent > 80) {
+ $("#bitcoin-progress-inner").addClass("glow-effect");
+ } else {
+ $("#bitcoin-progress-inner").removeClass("glow-effect");
+ }
+
+ // Update remaining seconds text - more precise calculation
+ let remainingSeconds = PROGRESS_MAX - seconds;
+
+ // When we're past the expected time, show "Waiting for update..."
+ if (remainingSeconds <= 0) {
+ $("#progress-text").text("Waiting for update...");
+ $("#bitcoin-progress-inner").addClass("waiting-for-update");
+ } else {
+ $("#progress-text").text(remainingSeconds + "s to next update");
+ $("#bitcoin-progress-inner").removeClass("waiting-for-update");
+ }
+}
+
+// Register Chart.js annotation plugin if available
+if (window['chartjs-plugin-annotation']) {
+ Chart.register(window['chartjs-plugin-annotation']);
+}
+
+// SSE Connection with Error Handling and Reconnection Logic
+function setupEventSource() {
+ console.log("Setting up EventSource connection...");
+
+ if (window.eventSource) {
+ console.log("Closing existing EventSource connection");
+ window.eventSource.close();
+ window.eventSource = null;
+ }
+
+ // Always use absolute URL with origin to ensure it works from any path
+ const baseUrl = window.location.origin;
+ const streamUrl = `${baseUrl}/stream`;
+
+ console.log("Current path:", window.location.pathname);
+ console.log("Using stream URL:", streamUrl);
+
+ // Clear any existing ping interval
+ if (pingInterval) {
+ clearInterval(pingInterval);
+ pingInterval = null;
+ }
+
+ // Clear any connection lost timeout
+ if (connectionLostTimeout) {
+ clearTimeout(connectionLostTimeout);
+ connectionLostTimeout = null;
+ }
+
+ try {
+ const eventSource = new EventSource(streamUrl);
+
+ eventSource.onopen = function(e) {
+ console.log("EventSource connection opened successfully");
+ connectionRetryCount = 0; // Reset retry count on successful connection
+ reconnectionDelay = 1000; // Reset reconnection delay
+ hideConnectionIssue();
+
+ // Start ping interval to detect dead connections
+ lastPingTime = Date.now();
+ pingInterval = setInterval(function() {
+ const now = Date.now();
+ if (now - lastPingTime > 60000) { // 60 seconds without data
+ console.warn("No data received for 60 seconds, reconnecting...");
+ showConnectionIssue("Connection stalled");
+ eventSource.close();
+ setupEventSource();
+ }
+ }, 10000); // Check every 10 seconds
+ };
+
+ eventSource.onmessage = function(e) {
+ console.log("SSE message received");
+ lastPingTime = Date.now(); // Update ping time on any message
+
+ try {
+ const data = JSON.parse(e.data);
+
+ // Handle different message types
+ if (data.type === "ping") {
+ console.log("Ping received:", data);
+ // Update connection count if available
+ if (data.connections !== undefined) {
+ console.log(`Active connections: ${data.connections}`);
+ }
+ return;
+ }
+
+ if (data.type === "timeout_warning") {
+ console.log(`Connection timeout warning: ${data.remaining}s remaining`);
+ // If less than 30 seconds remaining, prepare for reconnection
+ if (data.remaining < 30) {
+ console.log("Preparing for reconnection due to upcoming timeout");
+ }
+ return;
+ }
+
+ if (data.type === "timeout") {
+ console.log("Connection timeout from server:", data.message);
+ eventSource.close();
+ // If reconnect flag is true, reconnect immediately
+ if (data.reconnect) {
+ console.log("Server requested reconnection");
+ setTimeout(setupEventSource, 500);
+ } else {
+ setupEventSource();
+ }
+ return;
+ }
+
+ if (data.error) {
+ console.error("Server reported error:", data.error);
+ showConnectionIssue(data.error);
+
+ // If retry time provided, use it, otherwise use default
+ const retryTime = data.retry || 5000;
+ setTimeout(function() {
+ manualRefresh();
+ }, retryTime);
+ return;
+ }
+
+ // Process regular data update
+ latestMetrics = data;
+ updateUI();
+ hideConnectionIssue();
+
+ // Also explicitly trigger a data refresh event
+ $(document).trigger('dataRefreshed');
+ } catch (err) {
+ console.error("Error processing SSE data:", err);
+ showConnectionIssue("Data processing error");
+ }
+ };
+
+ eventSource.onerror = function(e) {
+ console.error("SSE connection error", e);
+ showConnectionIssue("Connection lost");
+
+ eventSource.close();
+
+ // Implement exponential backoff for reconnection
+ connectionRetryCount++;
+
+ if (connectionRetryCount > maxRetryCount) {
+ console.log("Maximum retry attempts reached, switching to polling mode");
+ if (pingInterval) {
+ clearInterval(pingInterval);
+ pingInterval = null;
+ }
+
+ // Switch to regular polling
+ showConnectionIssue("Using polling mode");
+ setInterval(manualRefresh, 30000); // Poll every 30 seconds
+ manualRefresh(); // Do an immediate refresh
+ return;
+ }
+
+ // Exponential backoff with jitter
+ const jitter = Math.random() * 0.3 + 0.85; // 0.85-1.15
+ reconnectionDelay = Math.min(30000, reconnectionDelay * 1.5 * jitter);
+
+ console.log(`Reconnecting in ${(reconnectionDelay/1000).toFixed(1)} seconds... (attempt ${connectionRetryCount}/${maxRetryCount})`);
+ setTimeout(setupEventSource, reconnectionDelay);
+ };
+
+ window.eventSource = eventSource;
+ console.log("EventSource setup complete");
+
+ // Set a timeout to detect if connection is established
+ connectionLostTimeout = setTimeout(function() {
+ if (eventSource.readyState !== 1) { // 1 = OPEN
+ console.warn("Connection not established within timeout, switching to manual refresh");
+ showConnectionIssue("Connection timeout");
+ eventSource.close();
+ manualRefresh();
+ }
+ }, 10000); // 10 seconds timeout to establish connection
+
+ } catch (error) {
+ console.error("Failed to create EventSource:", error);
+ showConnectionIssue("Connection setup failed");
+ setTimeout(setupEventSource, 5000); // Try again in 5 seconds
+ }
+
+ // Add page visibility change listener
+ // This helps reconnect when user returns to the tab after it's been inactive
+ document.removeEventListener("visibilitychange", handleVisibilityChange);
+ document.addEventListener("visibilitychange", handleVisibilityChange);
+}
+
+// Handle page visibility changes
+function handleVisibilityChange() {
+ if (!document.hidden) {
+ console.log("Page became visible, checking connection");
+ if (!window.eventSource || window.eventSource.readyState !== 1) {
+ console.log("Connection not active, reestablishing");
+ setupEventSource();
+ }
+ manualRefresh(); // Always refresh data when page becomes visible
+ }
+}
+
+// Helper function to show connection issues to the user
+function showConnectionIssue(message) {
+ let $connectionStatus = $("#connectionStatus");
+ if (!$connectionStatus.length) {
+ $("body").append('
');
+ $connectionStatus = $("#connectionStatus");
+ }
+ $connectionStatus.html(` ${message}`).show();
+
+ // Show manual refresh button when there are connection issues
+ $("#refreshButton").show();
+}
+
+// Helper function to hide connection issue message
+function hideConnectionIssue() {
+ $("#connectionStatus").hide();
+ $("#refreshButton").hide();
+}
+
+// Improved manual refresh function as fallback
+function manualRefresh() {
+ console.log("Manually refreshing data...");
+
+ $.ajax({
+ url: '/api/metrics',
+ method: 'GET',
+ dataType: 'json',
+ timeout: 15000, // 15 second timeout
+ success: function(data) {
+ console.log("Manual refresh successful");
+ lastPingTime = Date.now(); // Update ping time
+ latestMetrics = data;
+ updateUI();
+ hideConnectionIssue();
+
+ // Explicitly trigger data refresh event
+ $(document).trigger('dataRefreshed');
+ },
+ error: function(xhr, status, error) {
+ console.error("Manual refresh failed:", error);
+ showConnectionIssue("Manual refresh failed");
+
+ // Try again with exponential backoff
+ const retryDelay = Math.min(30000, 1000 * Math.pow(1.5, Math.min(5, connectionRetryCount)));
+ connectionRetryCount++;
+ setTimeout(manualRefresh, retryDelay);
+ }
+ });
+}
+
+// Initialize Chart.js with Error Handling
+function initializeChart() {
+ try {
+ const ctx = document.getElementById('trendGraph').getContext('2d');
+ if (!ctx) {
+ console.error("Could not find trend graph canvas");
+ return null;
+ }
+
+ if (!window.Chart) {
+ console.error("Chart.js not loaded");
+ return null;
+ }
+
+ // Check if Chart.js plugin is available
+ const hasAnnotationPlugin = window['chartjs-plugin-annotation'] !== undefined;
+
+ return new Chart(ctx, {
+ type: 'line',
+ data: {
+ labels: [],
+ datasets: [{
+ label: '60s Hashrate Trend (TH/s)',
+ data: [],
+ borderColor: '#f7931a',
+ backgroundColor: 'rgba(247,147,26,0.1)',
+ fill: true,
+ tension: 0.2,
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ animation: {
+ duration: 0 // Disable animations for better performance
+ },
+ scales: {
+ x: { display: false },
+ y: {
+ ticks: { color: 'white' },
+ grid: { color: '#333' }
+ }
+ },
+ plugins: {
+ legend: { display: false },
+ annotation: hasAnnotationPlugin ? {
+ annotations: {
+ averageLine: {
+ type: 'line',
+ yMin: 0,
+ yMax: 0,
+ borderColor: '#f7931a',
+ borderWidth: 2,
+ borderDash: [6, 6],
+ label: {
+ enabled: true,
+ content: '24hr Avg: 0 TH/s',
+ backgroundColor: 'rgba(0,0,0,0.7)',
+ color: '#f7931a',
+ font: { weight: 'bold', size: 13 },
+ position: 'start'
+ }
+ }
+ }
+ } : {}
+ }
+ }
+ });
+ } catch (error) {
+ console.error("Error initializing chart:", error);
+ return null;
+ }
+}
+
+// Helper function to safely format numbers with commas
+function numberWithCommas(x) {
+ if (x == null) return "N/A";
+ return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
+}
+
+// Server time update via polling
+function updateServerTime() {
+ $.ajax({
+ url: "/api/time",
+ method: "GET",
+ timeout: 5000,
+ success: function(data) {
+ serverTimeOffset = new Date(data.server_timestamp).getTime() - Date.now();
+ serverStartTime = new Date(data.server_start_time).getTime();
+ },
+ error: function(jqXHR, textStatus, errorThrown) {
+ console.error("Error fetching server time:", textStatus, errorThrown);
+ }
+ });
+}
+
+// Update uptime display
+function updateUptime() {
+ if (serverStartTime) {
+ const currentServerTime = Date.now() + serverTimeOffset;
+ const diff = currentServerTime - serverStartTime;
+ const hours = Math.floor(diff / (1000 * 60 * 60));
+ const minutes = Math.floor((diff % (1000 * 60 * 60)) / (1000 * 60));
+ const seconds = Math.floor((diff % (1000 * 60)) / 1000);
+ $("#uptimeTimer").html("Uptime: " + hours + "h " + minutes + "m " + seconds + "s");
+ }
+}
+
+// Update UI indicators (arrows)
+function updateIndicators(newMetrics) {
+ const keys = [
+ "pool_total_hashrate", "hashrate_24hr", "hashrate_3hr", "hashrate_10min",
+ "hashrate_60sec", "block_number", "btc_price", "network_hashrate",
+ "difficulty", "daily_revenue", "daily_power_cost", "daily_profit_usd",
+ "monthly_profit_usd", "daily_mined_sats", "monthly_mined_sats", "unpaid_earnings",
+ "estimated_earnings_per_day_sats", "estimated_earnings_next_block_sats", "estimated_rewards_in_window_sats",
+ "workers_hashing"
+ ];
+
+ keys.forEach(function(key) {
+ const newVal = parseFloat(newMetrics[key]);
+ if (isNaN(newVal)) return;
+
+ const oldVal = parseFloat(previousMetrics[key]);
+ if (!isNaN(oldVal)) {
+ if (newVal > oldVal) {
+ persistentArrows[key] = " ";
+ } else if (newVal < oldVal) {
+ persistentArrows[key] = " ";
+ }
+ } else {
+ if (newMetrics.arrow_history && newMetrics.arrow_history[key] && newMetrics.arrow_history[key].length > 0) {
+ const historyArr = newMetrics.arrow_history[key];
+ for (let i = historyArr.length - 1; i >= 0; i--) {
+ if (historyArr[i].arrow !== "") {
+ if (historyArr[i].arrow === "↑") {
+ persistentArrows[key] = " ";
+ } else if (historyArr[i].arrow === "↓") {
+ persistentArrows[key] = " ";
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ const indicator = document.getElementById("indicator_" + key);
+ if (indicator) {
+ indicator.innerHTML = persistentArrows[key] || "";
+ }
+ });
+
+ previousMetrics = { ...newMetrics };
+}
+
+// Helper function to safely update element text content
+function updateElementText(elementId, text) {
+ const element = document.getElementById(elementId);
+ if (element) {
+ element.textContent = text;
+ }
+}
+
+// Helper function to safely update element HTML content
+function updateElementHTML(elementId, html) {
+ const element = document.getElementById(elementId);
+ if (element) {
+ element.innerHTML = html;
+ }
+}
+
+// Update workers_hashing value from metrics, but don't try to access worker details
+function updateWorkersCount() {
+ if (latestMetrics && latestMetrics.workers_hashing !== undefined) {
+ $("#workers_hashing").text(latestMetrics.workers_hashing || 0);
+
+ // Update miner status with online/offline indicator based on worker count
+ if (latestMetrics.workers_hashing > 0) {
+ updateElementHTML("miner_status", "ONLINE ");
+ } else {
+ updateElementHTML("miner_status", "OFFLINE ");
+ }
+ }
+}
+
+// Check for block updates and show congratulatory messages
+function checkForBlockUpdates(data) {
+ if (previousMetrics.last_block_height !== undefined &&
+ data.last_block_height !== previousMetrics.last_block_height) {
+ showCongrats("Congrats! New Block Found: " + data.last_block_height);
+ }
+
+ if (previousMetrics.blocks_found !== undefined &&
+ data.blocks_found !== previousMetrics.blocks_found) {
+ showCongrats("Congrats! Blocks Found updated: " + data.blocks_found);
+ }
+}
+
+// Helper function to show congratulatory messages
+function showCongrats(message) {
+ const $congrats = $("#congratsMessage");
+ $congrats.text(message).fadeIn(500, function() {
+ setTimeout(function() {
+ $congrats.fadeOut(500);
+ }, 3000);
+ });
+}
+
+// Main UI update function
+function updateUI() {
+ if (!latestMetrics) {
+ console.warn("No metrics data available");
+ return;
+ }
+
+ try {
+ const data = latestMetrics;
+
+ // If there's execution time data, log it
+ if (data.execution_time) {
+ console.log(`Server metrics fetch took ${data.execution_time.toFixed(2)}s`);
+ }
+
+ // Cache jQuery selectors for performance and use safe update methods
+ updateElementText("pool_total_hashrate",
+ (data.pool_total_hashrate != null ? data.pool_total_hashrate : "N/A") + " " +
+ (data.pool_total_hashrate_unit ? data.pool_total_hashrate_unit.slice(0,-2).toUpperCase() + data.pool_total_hashrate_unit.slice(-2) : "")
+ );
+
+ updateElementText("hashrate_24hr",
+ (data.hashrate_24hr != null ? data.hashrate_24hr : "N/A") + " " +
+ (data.hashrate_24hr_unit ? data.hashrate_24hr_unit.slice(0,-2).toUpperCase() + data.hashrate_24hr_unit.slice(-2) : "")
+ );
+
+ updateElementText("hashrate_3hr",
+ (data.hashrate_3hr != null ? data.hashrate_3hr : "N/A") + " " +
+ (data.hashrate_3hr_unit ? data.hashrate_3hr_unit.slice(0,-2).toUpperCase() + data.hashrate_3hr_unit.slice(-2) : "")
+ );
+
+ updateElementText("hashrate_10min",
+ (data.hashrate_10min != null ? data.hashrate_10min : "N/A") + " " +
+ (data.hashrate_10min_unit ? data.hashrate_10min_unit.slice(0,-2).toUpperCase() + data.hashrate_10min_unit.slice(-2) : "")
+ );
+
+ updateElementText("hashrate_60sec",
+ (data.hashrate_60sec != null ? data.hashrate_60sec : "N/A") + " " +
+ (data.hashrate_60sec_unit ? data.hashrate_60sec_unit.slice(0,-2).toUpperCase() + data.hashrate_60sec_unit.slice(-2) : "")
+ );
+
+ updateElementText("block_number", numberWithCommas(data.block_number));
+
+ updateElementText("btc_price",
+ data.btc_price != null ? "$" + numberWithCommas(parseFloat(data.btc_price).toFixed(2)) : "N/A"
+ );
+
+ updateElementText("network_hashrate", numberWithCommas(Math.round(data.network_hashrate)) + " EH/s");
+ updateElementText("difficulty", numberWithCommas(Math.round(data.difficulty)));
+ updateElementText("daily_revenue", "$" + numberWithCommas(data.daily_revenue.toFixed(2)));
+ updateElementText("daily_power_cost", "$" + numberWithCommas(data.daily_power_cost.toFixed(2)));
+ updateElementText("daily_profit_usd", "$" + numberWithCommas(data.daily_profit_usd.toFixed(2)));
+ updateElementText("monthly_profit_usd", "$" + numberWithCommas(data.monthly_profit_usd.toFixed(2)));
+ updateElementText("daily_mined_sats", numberWithCommas(data.daily_mined_sats) + " sats");
+ updateElementText("monthly_mined_sats", numberWithCommas(data.monthly_mined_sats) + " sats");
+
+ // Update worker count from metrics (just the number, not full worker data)
+ updateWorkersCount();
+
+ updateElementText("unpaid_earnings", data.unpaid_earnings + " BTC");
+
+ // Update payout estimation with color coding
+ const payoutText = data.est_time_to_payout;
+ updateElementText("est_time_to_payout", payoutText);
+
+ if (payoutText && payoutText.toLowerCase().includes("next block")) {
+ $("#est_time_to_payout").css({
+ "color": "#32CD32",
+ "animation": "glowPulse 1s infinite"
+ });
+ } else {
+ const days = parseFloat(payoutText);
+ if (!isNaN(days)) {
+ if (days < 4) {
+ $("#est_time_to_payout").css({"color": "#32CD32", "animation": "none"});
+ } else if (days > 20) {
+ $("#est_time_to_payout").css({"color": "red", "animation": "none"});
+ } else {
+ $("#est_time_to_payout").css({"color": "#ffd700", "animation": "none"});
+ }
+ } else {
+ $("#est_time_to_payout").css({"color": "#ffd700", "animation": "none"});
+ }
+ }
+
+ updateElementText("last_block_height", data.last_block_height || "");
+ updateElementText("last_block_time", data.last_block_time || "");
+ updateElementText("blocks_found", data.blocks_found || "0");
+ updateElementText("last_share", data.total_last_share || "");
+
+ // Update Estimated Earnings metrics
+ updateElementText("estimated_earnings_per_day_sats", numberWithCommas(data.estimated_earnings_per_day_sats) + " sats");
+ updateElementText("estimated_earnings_next_block_sats", numberWithCommas(data.estimated_earnings_next_block_sats) + " sats");
+ updateElementText("estimated_rewards_in_window_sats", numberWithCommas(data.estimated_rewards_in_window_sats) + " sats");
+
+ // Update last updated timestamp
+ const now = new Date(Date.now() + serverTimeOffset);
+ updateElementHTML("lastUpdated", "Last Updated: " + now.toLocaleString() + " ");
+
+ // Update chart if it exists
+ if (trendChart) {
+ try {
+ // Always update the 24hr average line even if we don't have data points yet
+ const avg24hr = parseFloat(data.hashrate_24hr || 0);
+ if (!isNaN(avg24hr) &&
+ trendChart.options.plugins.annotation &&
+ trendChart.options.plugins.annotation.annotations &&
+ trendChart.options.plugins.annotation.annotations.averageLine) {
+ const annotation = trendChart.options.plugins.annotation.annotations.averageLine;
+ annotation.yMin = avg24hr;
+ annotation.yMax = avg24hr;
+ annotation.label.content = '24hr Avg: ' + avg24hr + ' TH/s';
+ }
+
+ // Update data points if we have any (removed minimum length requirement)
+ if (data.arrow_history && data.arrow_history.hashrate_60sec) {
+ const historyData = data.arrow_history.hashrate_60sec;
+ if (historyData && historyData.length > 0) {
+ console.log(`Updating chart with ${historyData.length} data points`);
+ trendChart.data.labels = historyData.map(item => item.time);
+ trendChart.data.datasets[0].data = historyData.map(item => {
+ const val = parseFloat(item.value);
+ return isNaN(val) ? 0 : val;
+ });
+ } else {
+ console.log("No history data points available yet");
+ }
+ } else {
+ console.log("No hashrate_60sec history available yet");
+
+ // If there's no history data, create a starting point using current hashrate
+ if (data.hashrate_60sec) {
+ const currentTime = new Date().toLocaleTimeString('en-US', {hour12: false, hour: '2-digit', minute: '2-digit'});
+ trendChart.data.labels = [currentTime];
+ trendChart.data.datasets[0].data = [parseFloat(data.hashrate_60sec) || 0];
+ console.log("Created initial data point with current hashrate");
+ }
+ }
+
+ // Always update the chart, even if we just updated the average line
+ trendChart.update('none');
+ } catch (chartError) {
+ console.error("Error updating chart:", chartError);
+ }
+ }
+
+ // Update indicators and check for block updates
+ updateIndicators(data);
+ checkForBlockUpdates(data);
+
+ } catch (error) {
+ console.error("Error updating UI:", error);
+ }
+}
+
+// Set up refresh synchronization
+function setupRefreshSync() {
+ // Listen for the dataRefreshed event
+ $(document).on('dataRefreshed', function() {
+ // Broadcast to any other open tabs/pages that the data has been refreshed
+ try {
+ // Store the current timestamp to localStorage
+ localStorage.setItem('dashboardRefreshTime', Date.now().toString());
+
+ // Create a custom event that can be detected across tabs/pages
+ localStorage.setItem('dashboardRefreshEvent', 'refresh-' + Date.now());
+
+ console.log("Dashboard refresh synchronized");
+ } catch (e) {
+ console.error("Error synchronizing refresh:", e);
+ }
+ });
+}
+
+// Document ready initialization
+$(document).ready(function() {
+ // Initialize the chart
+ trendChart = initializeChart();
+
+ // Initialize the progress bar
+ initProgressBar();
+
+ // Set up direct monitoring of data refreshes
+ $(document).on('dataRefreshed', function() {
+ console.log("Data refresh event detected, resetting progress bar");
+ lastUpdateTime = Date.now();
+ currentProgress = 0;
+ updateProgressBar(currentProgress);
+ });
+
+ // Wrap the updateUI function to detect changes and trigger events
+ const originalUpdateUI = updateUI;
+ updateUI = function() {
+ const previousMetricsTimestamp = latestMetrics ? latestMetrics.server_timestamp : null;
+
+ // Call the original function
+ originalUpdateUI.apply(this, arguments);
+
+ // Check if we got new data by comparing timestamps
+ if (latestMetrics && latestMetrics.server_timestamp !== previousMetricsTimestamp) {
+ console.log("New data detected, triggering refresh event");
+ $(document).trigger('dataRefreshed');
+ }
+ };
+
+ // Set up event source for SSE
+ setupEventSource();
+
+ // Start server time polling
+ updateServerTime();
+ setInterval(updateServerTime, 30000);
+
+ // Start uptime timer
+ setInterval(updateUptime, 1000);
+ updateUptime();
+
+ // Set up refresh synchronization with workers page
+ setupRefreshSync();
+
+ // Add a manual refresh button for fallback
+ $("body").append('Refresh Data ');
+
+ $("#refreshButton").on("click", function() {
+ $(this).text("Refreshing...");
+ $(this).prop("disabled", true);
+ manualRefresh();
+ setTimeout(function() {
+ $("#refreshButton").text("Refresh Data");
+ $("#refreshButton").prop("disabled", false);
+ }, 5000);
+ });
+
+ // Force a data refresh when the page loads
+ manualRefresh();
+
+ // Add emergency refresh button functionality
+ $("#forceRefreshBtn").show().on("click", function() {
+ $(this).text("Refreshing...");
+ $(this).prop("disabled", true);
+
+ $.ajax({
+ url: '/api/force-refresh',
+ method: 'POST',
+ timeout: 15000,
+ success: function(data) {
+ console.log("Force refresh successful:", data);
+ manualRefresh(); // Immediately get the new data
+ $("#forceRefreshBtn").text("Force Refresh").prop("disabled", false);
+ },
+ error: function(xhr, status, error) {
+ console.error("Force refresh failed:", error);
+ $("#forceRefreshBtn").text("Force Refresh").prop("disabled", false);
+ alert("Refresh failed: " + error);
+ }
+ });
+ });
+
+ // Add stale data detection
+ setInterval(function() {
+ if (latestMetrics && latestMetrics.server_timestamp) {
+ const lastUpdate = new Date(latestMetrics.server_timestamp);
+ const timeSinceUpdate = Math.floor((Date.now() - lastUpdate.getTime()) / 1000);
+ if (timeSinceUpdate > 120) { // More than 2 minutes
+ showConnectionIssue(`Data stale (${timeSinceUpdate}s old). Use Force Refresh.`);
+ $("#forceRefreshBtn").show();
+ }
+ }
+ }, 30000); // Check every 30 seconds
+});
\ No newline at end of file
diff --git a/static/js/retro-refresh.js b/static/js/retro-refresh.js
new file mode 100644
index 0000000..825737c
--- /dev/null
+++ b/static/js/retro-refresh.js
@@ -0,0 +1,238 @@
+// This script integrates the retro floating refresh bar
+// with the existing dashboard and workers page functionality
+
+(function() {
+ // Wait for DOM to be ready
+ document.addEventListener('DOMContentLoaded', function() {
+ // Create the retro terminal bar if it doesn't exist yet
+ if (!document.getElementById('retro-terminal-bar')) {
+ createRetroTerminalBar();
+ }
+
+ // Hide the original refresh container
+ const originalRefreshUptime = document.getElementById('refreshUptime');
+ if (originalRefreshUptime) {
+ originalRefreshUptime.style.visibility = 'hidden';
+ originalRefreshUptime.style.height = '0';
+ originalRefreshUptime.style.overflow = 'hidden';
+
+ // Important: We keep the original elements and just hide them
+ // This ensures all existing JavaScript functions still work
+ }
+
+ // Add extra space at the bottom of the page to prevent the floating bar from covering content
+ const extraSpace = document.createElement('div');
+ extraSpace.style.height = '100px';
+ document.body.appendChild(extraSpace);
+ });
+
+ // Function to create the retro terminal bar
+ function createRetroTerminalBar() {
+ // Get the HTML content from the shared CSS/HTML
+ const html = `
+
+
+
+
+
+
+
+
+
+
+ 0s
+ 15s
+ 30s
+ 45s
+ 60s
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Uptime: 0h 0m 0s
+
+
+ `;
+
+ // Create a container for the HTML
+ const container = document.createElement('div');
+ container.innerHTML = html;
+
+ // Append to the body
+ document.body.appendChild(container.firstElementChild);
+
+ // Start the clock update
+ updateTerminalClock();
+ setInterval(updateTerminalClock, 1000);
+
+ // Check if terminal should be collapsed based on previous state
+ const isCollapsed = localStorage.getItem('terminalCollapsed') === 'true';
+ if (isCollapsed) {
+ document.getElementById('retro-terminal-bar').classList.add('collapsed');
+ }
+ }
+
+ // Function to update the terminal clock
+ function updateTerminalClock() {
+ const clockElement = document.getElementById('data-refresh-time');
+ if (clockElement) {
+ const now = new Date();
+ const hours = String(now.getHours()).padStart(2, '0');
+ const minutes = String(now.getMinutes()).padStart(2, '0');
+ const seconds = String(now.getSeconds()).padStart(2, '0');
+ clockElement.textContent = `${hours}:${minutes}:${seconds}`;
+ }
+ }
+
+ // Expose these functions globally for the onclick handlers
+ window.toggleTerminal = function() {
+ const terminal = document.getElementById('retro-terminal-bar');
+ terminal.classList.toggle('collapsed');
+
+ // Store state in localStorage
+ localStorage.setItem('terminalCollapsed', terminal.classList.contains('collapsed'));
+ };
+
+ window.hideTerminal = function() {
+ document.getElementById('retro-terminal-bar').style.display = 'none';
+
+ // Create a show button that appears at the bottom right
+ const showButton = document.createElement('button');
+ showButton.id = 'show-terminal-button';
+ showButton.textContent = 'Show Monitor';
+ showButton.style.position = 'fixed';
+ showButton.style.bottom = '10px';
+ showButton.style.right = '10px';
+ showButton.style.zIndex = '1000';
+ showButton.style.backgroundColor = '#f7931a';
+ showButton.style.color = '#000';
+ showButton.style.border = 'none';
+ showButton.style.padding = '8px 12px';
+ showButton.style.cursor = 'pointer';
+ showButton.style.fontFamily = "'VT323', monospace";
+ showButton.style.fontSize = '14px';
+ showButton.onclick = function() {
+ document.getElementById('retro-terminal-bar').style.display = 'block';
+ this.remove();
+ };
+ document.body.appendChild(showButton);
+ };
+
+ // Redirect original progress bar updates to our new floating bar
+ // This Observer will listen for changes to the original #bitcoin-progress-inner
+ // and replicate them to our new floating bar version
+ const initProgressObserver = function() {
+ // Setup a MutationObserver to watch for style changes on the original progress bar
+ const originalProgressBar = document.querySelector('#refreshUptime #bitcoin-progress-inner');
+ const newProgressBar = document.querySelector('#retro-terminal-bar #bitcoin-progress-inner');
+
+ if (originalProgressBar && newProgressBar) {
+ const observer = new MutationObserver(function(mutations) {
+ mutations.forEach(function(mutation) {
+ if (mutation.attributeName === 'style') {
+ // Get the width from the original progress bar
+ const width = originalProgressBar.style.width;
+ if (width) {
+ // Apply it to our new progress bar
+ newProgressBar.style.width = width;
+
+ // Also copy any classes (like glow-effect)
+ if (originalProgressBar.classList.contains('glow-effect') &&
+ !newProgressBar.classList.contains('glow-effect')) {
+ newProgressBar.classList.add('glow-effect');
+ } else if (!originalProgressBar.classList.contains('glow-effect') &&
+ newProgressBar.classList.contains('glow-effect')) {
+ newProgressBar.classList.remove('glow-effect');
+ }
+
+ // Copy waiting-for-update class
+ if (originalProgressBar.classList.contains('waiting-for-update') &&
+ !newProgressBar.classList.contains('waiting-for-update')) {
+ newProgressBar.classList.add('waiting-for-update');
+ } else if (!originalProgressBar.classList.contains('waiting-for-update') &&
+ newProgressBar.classList.contains('waiting-for-update')) {
+ newProgressBar.classList.remove('waiting-for-update');
+ }
+ }
+ }
+ });
+ });
+
+ // Start observing
+ observer.observe(originalProgressBar, { attributes: true });
+ }
+
+ // Also watch for changes to the progress text
+ const originalProgressText = document.querySelector('#refreshUptime #progress-text');
+ const newProgressText = document.querySelector('#retro-terminal-bar #progress-text');
+
+ if (originalProgressText && newProgressText) {
+ const textObserver = new MutationObserver(function(mutations) {
+ mutations.forEach(function(mutation) {
+ if (mutation.type === 'childList') {
+ // Update the text in our new bar
+ newProgressText.textContent = originalProgressText.textContent;
+ }
+ });
+ });
+
+ // Start observing
+ textObserver.observe(originalProgressText, { childList: true, subtree: true });
+ }
+
+ // Watch for changes to the uptime timer
+ const originalUptimeTimer = document.querySelector('#refreshUptime #uptimeTimer');
+ const newUptimeTimer = document.querySelector('#retro-terminal-bar #uptimeTimer');
+
+ if (originalUptimeTimer && newUptimeTimer) {
+ const uptimeObserver = new MutationObserver(function(mutations) {
+ mutations.forEach(function(mutation) {
+ if (mutation.type === 'childList') {
+ // Update the text in our new bar
+ newUptimeTimer.innerHTML = originalUptimeTimer.innerHTML;
+ }
+ });
+ });
+
+ // Start observing
+ uptimeObserver.observe(originalUptimeTimer, { childList: true, subtree: true });
+ }
+ };
+
+ // Start the observer once the page is fully loaded
+ window.addEventListener('load', function() {
+ // Give a short delay to ensure all elements are rendered
+ setTimeout(initProgressObserver, 500);
+ });
+})();
\ No newline at end of file
diff --git a/static/js/workers.js b/static/js/workers.js
new file mode 100644
index 0000000..1d9287b
--- /dev/null
+++ b/static/js/workers.js
@@ -0,0 +1,641 @@
+"use strict";
+
+// Global variables for workers dashboard
+let workerData = null;
+let refreshTimer;
+let pageLoadTime = Date.now();
+let currentProgress = 0;
+const PROGRESS_MAX = 60; // 60 seconds for a complete cycle
+let lastUpdateTime = Date.now();
+let filterState = {
+ currentFilter: 'all',
+ searchTerm: ''
+};
+let miniChart = null;
+let connectionRetryCount = 0;
+
+// Server time variables for uptime calculation - synced with main dashboard
+let serverTimeOffset = 0;
+let serverStartTime = null;
+
+// New variable to track custom refresh timing
+let lastManualRefreshTime = 0;
+const MIN_REFRESH_INTERVAL = 10000; // Minimum 10 seconds between refreshes
+
+// Initialize the page
+$(document).ready(function() {
+ // Set up initial UI
+ initializePage();
+
+ // Get server time for uptime calculation
+ updateServerTime();
+
+ // Set up refresh synchronization with main dashboard
+ setupRefreshSync();
+
+ // Fetch worker data immediately on page load
+ fetchWorkerData();
+
+ // Set up refresh timer
+ setInterval(updateProgressBar, 1000);
+
+ // Set up uptime timer - synced with main dashboard
+ setInterval(updateUptime, 1000);
+
+ // Start server time polling - same as main dashboard
+ setInterval(updateServerTime, 30000);
+
+ // Auto-refresh worker data - aligned with main dashboard if possible
+ setInterval(function() {
+ // Check if it's been at least PROGRESS_MAX seconds since last update
+ const timeSinceLastUpdate = Date.now() - lastUpdateTime;
+ if (timeSinceLastUpdate >= PROGRESS_MAX * 1000) {
+ // Check if there was a recent manual refresh
+ const timeSinceManualRefresh = Date.now() - lastManualRefreshTime;
+ if (timeSinceManualRefresh >= MIN_REFRESH_INTERVAL) {
+ console.log("Auto-refresh triggered after time interval");
+ fetchWorkerData();
+ }
+ }
+ }, 10000); // Check every 10 seconds to align better with main dashboard
+
+ // Set up filter button click handlers
+ $('.filter-button').click(function() {
+ $('.filter-button').removeClass('active');
+ $(this).addClass('active');
+ filterState.currentFilter = $(this).data('filter');
+ filterWorkers();
+ });
+
+ // Set up search input handler
+ $('#worker-search').on('input', function() {
+ filterState.searchTerm = $(this).val().toLowerCase();
+ filterWorkers();
+ });
+});
+
+// Set up refresh synchronization with main dashboard
+function setupRefreshSync() {
+ // Listen for storage events (triggered by main dashboard)
+ window.addEventListener('storage', function(event) {
+ // Check if this is our dashboard refresh event
+ if (event.key === 'dashboardRefreshEvent') {
+ console.log("Detected dashboard refresh event");
+
+ // Prevent too frequent refreshes
+ const now = Date.now();
+ const timeSinceLastRefresh = now - lastUpdateTime;
+
+ if (timeSinceLastRefresh >= MIN_REFRESH_INTERVAL) {
+ console.log("Syncing refresh with main dashboard");
+ // Reset progress bar and immediately fetch
+ resetProgressBar();
+ // Refresh the worker data
+ fetchWorkerData();
+ } else {
+ console.log("Skipping too-frequent refresh", timeSinceLastRefresh);
+ // Just reset the progress bar to match main dashboard
+ resetProgressBar();
+ }
+ }
+ });
+
+ // On page load, check if we should align with main dashboard timing
+ try {
+ const lastDashboardRefresh = localStorage.getItem('dashboardRefreshTime');
+ if (lastDashboardRefresh) {
+ const lastRefreshTime = parseInt(lastDashboardRefresh);
+ const timeSinceLastDashboardRefresh = Date.now() - lastRefreshTime;
+
+ // If main dashboard refreshed recently, adjust our timer
+ if (timeSinceLastDashboardRefresh < PROGRESS_MAX * 1000) {
+ console.log("Adjusting timer to align with main dashboard");
+ currentProgress = Math.floor(timeSinceLastDashboardRefresh / 1000);
+ updateProgressBar(currentProgress);
+
+ // Calculate when next update will happen (roughly 60 seconds from last dashboard refresh)
+ const timeUntilNextRefresh = (PROGRESS_MAX * 1000) - timeSinceLastDashboardRefresh;
+
+ // Schedule a one-time check near the expected refresh time
+ if (timeUntilNextRefresh > 0) {
+ console.log(`Scheduling coordinated refresh in ${Math.floor(timeUntilNextRefresh/1000)} seconds`);
+ setTimeout(function() {
+ // Check if a refresh happened in the last few seconds via localStorage event
+ const newLastRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
+ const secondsSinceLastRefresh = (Date.now() - newLastRefresh) / 1000;
+
+ // If dashboard hasn't refreshed in the last 5 seconds, do our own refresh
+ if (secondsSinceLastRefresh > 5) {
+ console.log("Coordinated refresh time reached, fetching data");
+ fetchWorkerData();
+ } else {
+ console.log("Dashboard already refreshed recently, skipping coordinated refresh");
+ }
+ }, timeUntilNextRefresh);
+ }
+ }
+ }
+ } catch (e) {
+ console.error("Error reading dashboard refresh time:", e);
+ }
+
+ // Check for dashboard refresh periodically
+ setInterval(function() {
+ try {
+ const lastDashboardRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
+ const now = Date.now();
+ const timeSinceLastRefresh = (now - lastUpdateTime) / 1000;
+ const timeSinceDashboardRefresh = (now - lastDashboardRefresh) / 1000;
+
+ // If dashboard refreshed more recently than we did and we haven't refreshed in at least 10 seconds
+ if (lastDashboardRefresh > lastUpdateTime && timeSinceLastRefresh > 10) {
+ console.log("Catching up with dashboard refresh");
+ resetProgressBar();
+ fetchWorkerData();
+ }
+ } catch (e) {
+ console.error("Error in periodic dashboard check:", e);
+ }
+ }, 5000); // Check every 5 seconds
+}
+
+// Server time update via polling - same as main.js
+function updateServerTime() {
+ $.ajax({
+ url: "/api/time",
+ method: "GET",
+ timeout: 5000,
+ success: function(data) {
+ serverTimeOffset = new Date(data.server_timestamp).getTime() - Date.now();
+ serverStartTime = new Date(data.server_start_time).getTime();
+ },
+ error: function(jqXHR, textStatus, errorThrown) {
+ console.error("Error fetching server time:", textStatus, errorThrown);
+ }
+ });
+}
+
+// Update uptime display - synced with main dashboard
+function updateUptime() {
+ if (serverStartTime) {
+ const currentServerTime = Date.now() + serverTimeOffset;
+ const diff = currentServerTime - serverStartTime;
+ const hours = Math.floor(diff / (1000 * 60 * 60));
+ const minutes = Math.floor((diff % (1000 * 60 * 60)) / (1000 * 60));
+ const seconds = Math.floor((diff % (1000 * 60)) / 1000);
+ $("#uptimeTimer").html("Uptime: " + hours + "h " + minutes + "m " + seconds + "s");
+ }
+}
+
+// Initialize page elements
+function initializePage() {
+ // Initialize mini chart for total hashrate if the element exists
+ if (document.getElementById('total-hashrate-chart')) {
+ initializeMiniChart();
+ }
+
+ // Show loading state
+ $('#worker-grid').html(' Loading worker data...
');
+
+ // Add retry button (hidden by default)
+ if (!$('#retry-button').length) {
+ $('body').append('Retry Loading Data ');
+
+ $('#retry-button').on('click', function() {
+ $(this).text('Retrying...').prop('disabled', true);
+ fetchWorkerData(true);
+ setTimeout(() => {
+ $('#retry-button').text('Retry Loading Data').prop('disabled', false);
+ }, 3000);
+ });
+ }
+}
+
+// Fetch worker data from API
+function fetchWorkerData(forceRefresh = false) {
+ // Track this as a manual refresh for throttling purposes
+ lastManualRefreshTime = Date.now();
+
+ $('#worker-grid').addClass('loading-fade');
+
+ // Update progress bar to show data is being fetched
+ resetProgressBar();
+
+ // Choose API URL based on whether we're forcing a refresh
+ const apiUrl = `/api/workers${forceRefresh ? '?force=true' : ''}`;
+
+ $.ajax({
+ url: apiUrl,
+ method: 'GET',
+ dataType: 'json',
+ timeout: 15000, // 15 second timeout
+ success: function(data) {
+ workerData = data;
+ lastUpdateTime = Date.now();
+
+ // Update UI with new data
+ updateWorkerGrid();
+ updateSummaryStats();
+ updateMiniChart();
+ updateLastUpdated();
+
+ // Hide retry button
+ $('#retry-button').hide();
+
+ // Reset connection retry count
+ connectionRetryCount = 0;
+
+ console.log("Worker data updated successfully");
+ },
+ error: function(xhr, status, error) {
+ console.error("Error fetching worker data:", error);
+
+ // Show error in worker grid
+ $('#worker-grid').html(`
+
+
+
Error loading worker data: ${error || 'Unknown error'}
+
+ `);
+
+ // Show retry button
+ $('#retry-button').show();
+
+ // Implement exponential backoff for automatic retry
+ connectionRetryCount++;
+ const delay = Math.min(30000, 1000 * Math.pow(1.5, Math.min(5, connectionRetryCount)));
+ console.log(`Will retry in ${delay/1000} seconds (attempt ${connectionRetryCount})`);
+
+ setTimeout(() => {
+ fetchWorkerData(true); // Force refresh on retry
+ }, delay);
+ },
+ complete: function() {
+ $('#worker-grid').removeClass('loading-fade');
+ }
+ });
+}
+
+// Update the worker grid with data
+// UPDATED FUNCTION
+function updateWorkerGrid() {
+ if (!workerData || !workerData.workers) {
+ console.error("No worker data available");
+ return;
+ }
+
+ const workerGrid = $('#worker-grid');
+ workerGrid.empty();
+
+ // Apply current filters before rendering
+ const filteredWorkers = filterWorkersData(workerData.workers);
+
+ if (filteredWorkers.length === 0) {
+ workerGrid.html(`
+
+
+
No workers match your filter criteria
+
+ `);
+ return;
+ }
+
+ // Calculate total unpaid earnings (from the dashboard)
+ const totalUnpaidEarnings = workerData.total_earnings || 0;
+
+ // Sum up hashrates of online workers to calculate share percentages
+ const totalHashrate = workerData.workers
+ .filter(w => w.status === 'online')
+ .reduce((sum, w) => sum + parseFloat(w.hashrate_3hr || 0), 0);
+
+ // Calculate share percentage for each worker
+ const onlineWorkers = workerData.workers.filter(w => w.status === 'online');
+ const offlineWorkers = workerData.workers.filter(w => w.status === 'offline');
+
+ // Allocate 95% to online workers, 5% to offline workers
+ const onlinePool = totalUnpaidEarnings * 0.95;
+ const offlinePool = totalUnpaidEarnings * 0.05;
+
+ // Generate worker cards
+ filteredWorkers.forEach(worker => {
+ // Calculate earnings share based on hashrate proportion
+ let earningsDisplay = worker.earnings;
+
+ // Explicitly recalculate earnings share for display consistency
+ if (worker.status === 'online' && totalHashrate > 0) {
+ const hashrateShare = parseFloat(worker.hashrate_3hr || 0) / totalHashrate;
+ earningsDisplay = (onlinePool * hashrateShare).toFixed(8);
+ } else if (worker.status === 'offline' && offlineWorkers.length > 0) {
+ earningsDisplay = (offlinePool / offlineWorkers.length).toFixed(8);
+ }
+
+ // Create worker card
+ const card = $('
');
+
+ // Add class based on status
+ if (worker.status === 'online') {
+ card.addClass('worker-card-online');
+ } else {
+ card.addClass('worker-card-offline');
+ }
+
+ // Add worker type badge
+ card.append(`${worker.type}
`);
+
+ // Add worker name
+ card.append(`${worker.name}
`);
+
+ // Add status badge
+ if (worker.status === 'online') {
+ card.append('ONLINE
');
+ } else {
+ card.append('OFFLINE
');
+ }
+
+ // Add hashrate bar
+ const maxHashrate = 200; // TH/s - adjust based on your fleet
+ const hashratePercent = Math.min(100, (worker.hashrate_3hr / maxHashrate) * 100);
+ card.append(`
+
+
Hashrate (3hr):
+
${worker.hashrate_3hr} ${worker.hashrate_3hr_unit}
+
+
+ `);
+
+ // Add additional stats - NOTE: Using recalculated earnings
+ card.append(`
+
+
+
Last Share:
+
${worker.last_share.split(' ')[1]}
+
+
+
Earnings:
+
${earningsDisplay}
+
+
+
Accept Rate:
+
${worker.acceptance_rate}%
+
+
+
Temp:
+
${worker.temperature > 0 ? worker.temperature + '°C' : 'N/A'}
+
+
+ `);
+
+ // Add card to grid
+ workerGrid.append(card);
+ });
+
+ // Verify the sum of displayed earnings equals the total
+ console.log(`Total unpaid earnings: ${totalUnpaidEarnings} BTC`);
+ console.log(`Sum of worker displayed earnings: ${
+ filteredWorkers.reduce((sum, w) => {
+ if (w.status === 'online' && totalHashrate > 0) {
+ const hashrateShare = parseFloat(w.hashrate_3hr || 0) / totalHashrate;
+ return sum + parseFloat((onlinePool * hashrateShare).toFixed(8));
+ } else if (w.status === 'offline' && offlineWorkers.length > 0) {
+ return sum + parseFloat((offlinePool / offlineWorkers.length).toFixed(8));
+ }
+ return sum;
+ }, 0)
+ } BTC`);
+}
+
+// Filter worker data based on current filter state
+function filterWorkersData(workers) {
+ if (!workers) return [];
+
+ return workers.filter(worker => {
+ const workerName = worker.name.toLowerCase();
+ const isOnline = worker.status === 'online';
+ const workerType = worker.type.toLowerCase();
+
+ // Check if worker matches filter
+ let matchesFilter = false;
+ if (filterState.currentFilter === 'all') {
+ matchesFilter = true;
+ } else if (filterState.currentFilter === 'online' && isOnline) {
+ matchesFilter = true;
+ } else if (filterState.currentFilter === 'offline' && !isOnline) {
+ matchesFilter = true;
+ } else if (filterState.currentFilter === 'asic' && workerType === 'asic') {
+ matchesFilter = true;
+ } else if (filterState.currentFilter === 'fpga' && workerType === 'fpga') {
+ matchesFilter = true;
+ }
+
+ // Check if worker matches search term
+ const matchesSearch = workerName.includes(filterState.searchTerm);
+
+ return matchesFilter && matchesSearch;
+ });
+}
+
+// Apply filter to rendered worker cards
+function filterWorkers() {
+ if (!workerData || !workerData.workers) return;
+
+ // Re-render the worker grid with current filters
+ updateWorkerGrid();
+}
+
+// Modified updateSummaryStats function for workers.js
+function updateSummaryStats() {
+ if (!workerData) return;
+
+ // Update worker counts
+ $('#workers-count').text(workerData.workers_total || 0);
+ $('#workers-online').text(workerData.workers_online || 0);
+ $('#workers-offline').text(workerData.workers_offline || 0);
+
+ // Update worker ring percentage
+ const onlinePercent = workerData.workers_total > 0 ?
+ workerData.workers_online / workerData.workers_total : 0;
+ $('.worker-ring').css('--online-percent', onlinePercent);
+
+ // IMPORTANT: Update total hashrate using EXACT format matching main dashboard
+ // This ensures the displayed value matches exactly what's on the main page
+ if (workerData.total_hashrate !== undefined) {
+ // Format with exactly 1 decimal place - matches main dashboard format
+ const formattedHashrate = Number(workerData.total_hashrate).toFixed(1);
+ $('#total-hashrate').text(`${formattedHashrate} ${workerData.hashrate_unit || 'TH/s'}`);
+ } else {
+ $('#total-hashrate').text(`0.0 ${workerData.hashrate_unit || 'TH/s'}`);
+ }
+
+ // Update other summary stats
+ $('#total-earnings').text(`${(workerData.total_earnings || 0).toFixed(8)} BTC`);
+ $('#daily-sats').text(`${numberWithCommas(workerData.daily_sats || 0)} sats`);
+ $('#avg-acceptance-rate').text(`${(workerData.avg_acceptance_rate || 0).toFixed(2)}%`);
+}
+
+// Initialize mini chart
+function initializeMiniChart() {
+ const ctx = document.getElementById('total-hashrate-chart').getContext('2d');
+
+ // Generate some sample data to start
+ const labels = Array(24).fill('').map((_, i) => i);
+ const data = [750, 760, 755, 770, 780, 775, 760, 765, 770, 775, 780, 790, 785, 775, 770, 765, 780, 785, 775, 770, 775, 780, 775, 774.8];
+
+ miniChart = new Chart(ctx, {
+ type: 'line',
+ data: {
+ labels: labels,
+ datasets: [{
+ data: data,
+ borderColor: '#1137F5',
+ backgroundColor: 'rgba(57, 255, 20, 0.1)',
+ fill: true,
+ tension: 0.3,
+ borderWidth: 1.5,
+ pointRadius: 0
+ }]
+ },
+ options: {
+ responsive: true,
+ maintainAspectRatio: false,
+ scales: {
+ x: { display: false },
+ y: {
+ display: false,
+ min: Math.min(...data) * 0.9,
+ max: Math.max(...data) * 1.1
+ }
+ },
+ plugins: {
+ legend: { display: false },
+ tooltip: { enabled: false }
+ },
+ animation: false,
+ elements: {
+ line: {
+ tension: 0.4
+ }
+ }
+ }
+ });
+}
+
+// Update mini chart with real data
+function updateMiniChart() {
+ if (!miniChart || !workerData || !workerData.hashrate_history) return;
+
+ // Extract hashrate data from history
+ const historyData = workerData.hashrate_history;
+ if (!historyData || historyData.length === 0) return;
+
+ // Get the values for the chart
+ const values = historyData.map(item => parseFloat(item.value) || 0);
+ const labels = historyData.map(item => item.time);
+
+ // Update chart data
+ miniChart.data.labels = labels;
+ miniChart.data.datasets[0].data = values;
+
+ // Update y-axis range
+ const min = Math.min(...values);
+ const max = Math.max(...values);
+ miniChart.options.scales.y.min = min * 0.9;
+ miniChart.options.scales.y.max = max * 1.1;
+
+ // Update the chart
+ miniChart.update('none');
+}
+
+// Update progress bar
+function updateProgressBar() {
+ if (currentProgress < PROGRESS_MAX) {
+ currentProgress++;
+ const progressPercent = (currentProgress / PROGRESS_MAX) * 100;
+ $("#bitcoin-progress-inner").css("width", progressPercent + "%");
+
+ // Add glowing effect when close to completion
+ if (progressPercent > 80) {
+ $("#bitcoin-progress-inner").addClass("glow-effect");
+ } else {
+ $("#bitcoin-progress-inner").removeClass("glow-effect");
+ }
+
+ // Update remaining seconds text
+ let remainingSeconds = PROGRESS_MAX - currentProgress;
+ if (remainingSeconds <= 0) {
+ $("#progress-text").text("Waiting for update...");
+ $("#bitcoin-progress-inner").addClass("waiting-for-update");
+ } else {
+ $("#progress-text").text(remainingSeconds + "s to next update");
+ $("#bitcoin-progress-inner").removeClass("waiting-for-update");
+ }
+
+ // Check for main dashboard refresh near the end to ensure sync
+ if (currentProgress >= 55) { // When we're getting close to refresh time
+ try {
+ const lastDashboardRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
+ const secondsSinceDashboardRefresh = (Date.now() - lastDashboardRefresh) / 1000;
+
+ // If main dashboard just refreshed (within last 5 seconds)
+ if (secondsSinceDashboardRefresh <= 5) {
+ console.log("Detected recent dashboard refresh, syncing now");
+ resetProgressBar();
+ fetchWorkerData();
+ return;
+ }
+ } catch (e) {
+ console.error("Error checking dashboard refresh status:", e);
+ }
+ }
+ } else {
+ // Reset progress bar if it's time to refresh
+ // But first check if the main dashboard refreshed recently
+ try {
+ const lastDashboardRefresh = parseInt(localStorage.getItem('dashboardRefreshTime') || '0');
+ const secondsSinceDashboardRefresh = (Date.now() - lastDashboardRefresh) / 1000;
+
+ // If dashboard refreshed in the last 10 seconds, wait for it instead of refreshing ourselves
+ if (secondsSinceDashboardRefresh < 10) {
+ console.log("Waiting for dashboard refresh event instead of refreshing independently");
+ return;
+ }
+ } catch (e) {
+ console.error("Error checking dashboard refresh status:", e);
+ }
+
+ // If main dashboard hasn't refreshed recently, do our own refresh
+ if (Date.now() - lastUpdateTime > PROGRESS_MAX * 1000) {
+ console.log("Progress bar expired, fetching data");
+ fetchWorkerData();
+ }
+ }
+}
+
+// Reset progress bar
+function resetProgressBar() {
+ currentProgress = 0;
+ $("#bitcoin-progress-inner").css("width", "0%");
+ $("#bitcoin-progress-inner").removeClass("glow-effect");
+ $("#bitcoin-progress-inner").removeClass("waiting-for-update");
+ $("#progress-text").text(PROGRESS_MAX + "s to next update");
+}
+
+// Update the last updated timestamp
+function updateLastUpdated() {
+ if (!workerData || !workerData.timestamp) return;
+
+ try {
+ const timestamp = new Date(workerData.timestamp);
+ $("#lastUpdated").html("Last Updated: " +
+ timestamp.toLocaleString() + " ");
+ } catch (e) {
+ console.error("Error formatting timestamp:", e);
+ }
+}
+
+// Format numbers with commas
+function numberWithCommas(x) {
+ if (x == null) return "N/A";
+ return x.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
+}
\ No newline at end of file
diff --git a/templates/base.html b/templates/base.html
new file mode 100644
index 0000000..27e81fb
--- /dev/null
+++ b/templates/base.html
@@ -0,0 +1,90 @@
+
+
+
+
+
+ {% block title %}Ocean.xyz Pool Mining Dashboard{% endblock %}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {% block css %}{% endblock %}
+
+
+
+
+
+
+
+
Made by @DJO₿leezy
+
+
+
+ {% block last_updated %}
+
Last Updated: {{ current_time }}
+ {% endblock %}
+
+ {% block navigation %}
+
+ {% endblock %}
+
+
+ {% block content %}{% endblock %}
+
+
+ {% block refresh_bar %}
+
+ {% endblock %}
+
+
+ {% block congrats_message %}
+
+ {% endblock %}
+
+
+
+
+
+
+
+
+ {% block javascript %}{% endblock %}
+
+
+
+
+
diff --git a/templates/boot.html b/templates/boot.html
new file mode 100644
index 0000000..79857a3
--- /dev/null
+++ b/templates/boot.html
@@ -0,0 +1,369 @@
+
+
+
+
+
+ Ocean.xyz Pool Miner - Initializing...
+
+
+
+
+ SKIP
+
+ Loading mining data...
+
+██████╗ ████████╗ ██████╗ ██████╗ ███████╗
+██╔══██╗╚══██╔══╝██╔════╝ ██╔═══██╗██╔════╝
+██████╔╝ ██║ ██║ ██║ ██║███████╗
+██╔══██╗ ██║ ██║ ██║ ██║╚════██║
+██████╔╝ ██║ ╚██████╗ ╚██████╔╝███████║
+╚═════╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝
+ v.21
+
+
+
+
+
+ Initialize mining dashboard? [Y/N]:
+
+
+
+
+
+
+
+
+
+
diff --git a/templates/dashboard.html b/templates/dashboard.html
new file mode 100644
index 0000000..31d6380
--- /dev/null
+++ b/templates/dashboard.html
@@ -0,0 +1,363 @@
+{% extends "base.html" %}
+
+{% block title %}Ocean.xyz Pool Mining Dashboard v 0.2{% endblock %}
+
+{% block css %}
+
+{% endblock %}
+
+{% block dashboard_active %}active{% endblock %}
+
+{% block content %}
+
+
+
+
+
+
+
+
+
+
+
+
+ Status:
+
+ {% if metrics and metrics.workers_hashing and metrics.workers_hashing > 0 %}
+ ONLINE
+ {% else %}
+ OFFLINE
+ {% endif %}
+
+
+
+ Workers Hashing:
+ {{ metrics.workers_hashing or 0 }}
+
+
+
+ Last Share:
+ {{ metrics.total_last_share or "N/A" }}
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Pool Total Hashrate:
+
+ {% if metrics and metrics.pool_total_hashrate and metrics.pool_total_hashrate_unit %}
+ {{ metrics.pool_total_hashrate }} {{ metrics.pool_total_hashrate_unit[:-2]|upper ~ metrics.pool_total_hashrate_unit[-2:] }}
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+
+ 24hr Avg Hashrate:
+
+ {% if metrics and metrics.hashrate_24hr %}
+ {{ metrics.hashrate_24hr }}
+ {% if metrics.hashrate_24hr_unit %}
+ {{ metrics.hashrate_24hr_unit[:-2]|upper ~ metrics.hashrate_24hr_unit[-2:] }}
+ {% else %}
+ TH/s
+ {% endif %}
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+ 3hr Avg Hashrate:
+
+ {% if metrics and metrics.hashrate_3hr %}
+ {{ metrics.hashrate_3hr }}
+ {% if metrics.hashrate_3hr_unit %}
+ {{ metrics.hashrate_3hr_unit[:-2]|upper ~ metrics.hashrate_3hr_unit[-2:] }}
+ {% else %}
+ TH/s
+ {% endif %}
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+ 10min Avg Hashrate:
+
+ {% if metrics and metrics.hashrate_10min %}
+ {{ metrics.hashrate_10min }}
+ {% if metrics.hashrate_10min_unit %}
+ {{ metrics.hashrate_10min_unit[:-2]|upper ~ metrics.hashrate_10min_unit[-2:] }}
+ {% else %}
+ TH/s
+ {% endif %}
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+ 60sec Avg Hashrate:
+
+ {% if metrics and metrics.hashrate_60sec %}
+ {{ metrics.hashrate_60sec }}
+ {% if metrics.hashrate_60sec_unit %}
+ {{ metrics.hashrate_60sec_unit[:-2]|upper ~ metrics.hashrate_60sec_unit[-2:] }}
+ {% else %}
+ TH/s
+ {% endif %}
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+
+
+
+
+
+
+
+
+ Block Number:
+
+ {% if metrics and metrics.block_number %}
+ {{ metrics.block_number|commafy }}
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+ BTC Price:
+
+ {% if metrics and metrics.btc_price %}
+ ${{ "%.2f"|format(metrics.btc_price) }}
+ {% else %}
+ $0.00
+ {% endif %}
+
+
+
+
+ Network Hashrate:
+
+ {% if metrics and metrics.network_hashrate %}
+ {{ metrics.network_hashrate|round|commafy }} EH/s
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+ Difficulty:
+
+ {% if metrics and metrics.difficulty %}
+ {{ metrics.difficulty|round|commafy }}
+ {% else %}
+ N/A
+ {% endif %}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Daily Mined (Net):
+
+ {% if metrics and metrics.daily_mined_sats %}
+ {{ metrics.daily_mined_sats|commafy }} sats
+ {% else %}
+ 0 sats
+ {% endif %}
+
+
+
+
+ Monthly Mined (Net):
+
+ {% if metrics and metrics.monthly_mined_sats %}
+ {{ metrics.monthly_mined_sats|commafy }} sats
+ {% else %}
+ 0 sats
+ {% endif %}
+
+
+
+
+ Est. Earnings/Day:
+
+ {% if metrics and metrics.estimated_earnings_per_day_sats %}
+ {{ metrics.estimated_earnings_per_day_sats|commafy }} sats
+ {% else %}
+ 0 sats
+ {% endif %}
+
+
+
+
+ Est. Earnings/Block:
+
+ {% if metrics and metrics.estimated_earnings_next_block_sats %}
+ {{ metrics.estimated_earnings_next_block_sats|commafy }} sats
+ {% else %}
+ 0 sats
+ {% endif %}
+
+
+
+
+ Est. Rewards in Window:
+
+ {% if metrics and metrics.estimated_rewards_in_window_sats %}
+ {{ metrics.estimated_rewards_in_window_sats|commafy }} sats
+ {% else %}
+ 0 sats
+ {% endif %}
+
+
+
+
+
+
+
+
+
+
+
+
+ Daily Revenue:
+
+ {% if metrics and metrics.daily_revenue is defined and metrics.daily_revenue is not none %}
+ ${{ "%.2f"|format(metrics.daily_revenue) }}
+ {% else %}
+ $0.00
+ {% endif %}
+
+
+
+
+ Daily Power Cost:
+
+ {% if metrics and metrics.daily_power_cost is defined and metrics.daily_power_cost is not none %}
+ ${{ "%.2f"|format(metrics.daily_power_cost) }}
+ {% else %}
+ $0.00
+ {% endif %}
+
+
+
+
+ Daily Profit (USD):
+
+ {% if metrics and metrics.daily_profit_usd is defined and metrics.daily_profit_usd is not none %}
+ ${{ "%.2f"|format(metrics.daily_profit_usd) }}
+ {% else %}
+ $0.00
+ {% endif %}
+
+
+
+
+ Monthly Profit (USD):
+
+ {% if metrics and metrics.monthly_profit_usd is defined and metrics.monthly_profit_usd is not none %}
+ ${{ "%.2f"|format(metrics.monthly_profit_usd) }}
+ {% else %}
+ $0.00
+ {% endif %}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Unpaid Earnings:
+
+ {% if metrics and metrics.unpaid_earnings %}
+ {{ metrics.unpaid_earnings }} BTC
+ {% else %}
+ 0 BTC
+ {% endif %}
+
+
+
+
+ Last Block:
+
+ {{ metrics.last_block_height if metrics and metrics.last_block_height else "N/A" }}
+
+ —
+
+ {{ metrics.last_block_time if metrics and metrics.last_block_time else "N/A" }}
+
+ —
+
+ {% if metrics and metrics.last_block_earnings %}
+ +{{ metrics.last_block_earnings|int|commafy }} sats
+ {% else %}
+ +0 sats
+ {% endif %}
+
+
+
+
+ Est. Time to Payout:
+
+ {{ metrics.est_time_to_payout if metrics and metrics.est_time_to_payout else "N/A" }}
+
+
+
+
+ Blocks Found:
+
+ {{ metrics.blocks_found if metrics and metrics.blocks_found else "0" }}
+
+
+
+
+
+
+
+{% endblock %}
+
+{% block javascript %}
+
+
+{% endblock %}
diff --git a/templates/error.html b/templates/error.html
new file mode 100644
index 0000000..5ad2559
--- /dev/null
+++ b/templates/error.html
@@ -0,0 +1,22 @@
+
+
+
+
+
+ Error - Mining Dashboard
+
+
+
+
+
+
+
+
+
diff --git a/templates/workers.html b/templates/workers.html
new file mode 100644
index 0000000..5302f6f
--- /dev/null
+++ b/templates/workers.html
@@ -0,0 +1,106 @@
+{% extends "base.html" %}
+
+{% block title %}Workers Overview - Ocean.xyz Pool Mining Dashboard v 0.2{% endblock %}
+
+{% block css %}
+
+{% endblock %}
+
+{% block header %}Workers Overview{% endblock %}
+
+{% block workers_active %}active{% endblock %}
+
+{% block content %}
+
+
+
+
+
+
+
+
+
+
+ {{ workers_total }}
+
+
+
Workers
+
+ {{ workers_online }} /
+ {{ workers_offline }}
+
+
+
+
+
+ {% if total_hashrate is defined %}
+ {{ "%.1f"|format(total_hashrate) }} {{ hashrate_unit }}
+ {% else %}
+ N/A
+ {% endif %}
+
+
Total Hashrate
+
+
+
+
+
+
+
+ {% if total_earnings is defined %}
+ {{ "%.8f"|format(total_earnings) }} BTC
+ {% else %}
+ N/A
+ {% endif %}
+
+
Lifetime Earnings
+
+
+
+
+ {% if daily_sats is defined %}
+ {{ daily_sats|commafy }} sats
+ {% else %}
+ N/A
+ {% endif %}
+
+
Daily Sats
+
+
+
+
+ {% if avg_acceptance_rate is defined %}
+ {{ "%.2f"|format(avg_acceptance_rate) }}%
+ {% else %}
+ N/A
+ {% endif %}
+
+
Acceptance Rate
+
+
+
+
+
+
+
+
+
+
+
+ All Workers
+ Online
+ Offline
+ ASIC
+ FPGA
+
+
+
+
+
+
+
+{% endblock %}
+
+{% block javascript %}
+
+{% endblock %}
diff --git a/worker_service.py b/worker_service.py
new file mode 100644
index 0000000..06b62ab
--- /dev/null
+++ b/worker_service.py
@@ -0,0 +1,314 @@
+"""
+Worker service module for managing workers data.
+"""
+import logging
+import random
+from datetime import datetime, timedelta
+from zoneinfo import ZoneInfo
+
+class WorkerService:
+ """Service for generating and managing worker data."""
+
+ def __init__(self):
+ """Initialize the worker service."""
+ self.worker_data_cache = None
+ self.last_worker_data_update = None
+ self.WORKER_DATA_CACHE_TIMEOUT = 60 # Cache worker data for 60 seconds
+
+ def generate_default_workers_data(self):
+ """
+ Generate default worker data when no metrics are available.
+
+ Returns:
+ dict: Default worker data structure
+ """
+ return {
+ "workers": [],
+ "workers_total": 0,
+ "workers_online": 0,
+ "workers_offline": 0,
+ "total_hashrate": 0.0,
+ "hashrate_unit": "TH/s",
+ "total_earnings": 0.0,
+ "daily_sats": 0,
+ "avg_acceptance_rate": 0.0,
+ "hashrate_history": [],
+ "timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
+ }
+
+ def get_workers_data(self, cached_metrics, force_refresh=False):
+ """
+ Get worker data with caching for better performance.
+
+ Args:
+ cached_metrics (dict): Cached metrics from the dashboard
+ force_refresh (bool): Whether to force a refresh of cached data
+
+ Returns:
+ dict: Worker data
+ """
+ current_time = datetime.now().timestamp()
+
+ # Return cached data if it's still fresh and not forced to refresh
+ if not force_refresh and self.worker_data_cache and self.last_worker_data_update and \
+ (current_time - self.last_worker_data_update) < self.WORKER_DATA_CACHE_TIMEOUT:
+ logging.info("Using cached worker data")
+ return self.worker_data_cache
+
+ try:
+ # If metrics aren't available yet, return default data
+ if not cached_metrics:
+ return self.generate_default_workers_data()
+
+ # Check if we have workers_hashing information
+ workers_count = cached_metrics.get("workers_hashing", 0)
+ if workers_count <= 0:
+ return self.generate_default_workers_data()
+
+ # Get hashrate from cached metrics - using EXACT value
+ # Store this ORIGINAL value to ensure it's never changed in calculations
+ original_hashrate_3hr = float(cached_metrics.get("hashrate_3hr", 0) or 0)
+ hashrate_unit = cached_metrics.get("hashrate_3hr_unit", "TH/s")
+
+ # Generate worker data based on the number of active workers
+ workers_data = self.generate_workers_data(workers_count, original_hashrate_3hr, hashrate_unit)
+
+ # Calculate basic statistics
+ workers_online = len([w for w in workers_data if w['status'] == 'online'])
+ workers_offline = len(workers_data) - workers_online
+
+ # MODIFIED: Use unpaid_earnings from main dashboard instead of calculating from workers
+ unpaid_earnings = cached_metrics.get("unpaid_earnings", 0)
+ # Handle case where unpaid_earnings might be a string
+ if isinstance(unpaid_earnings, str):
+ try:
+ # Handle case where it might include "BTC" or other text
+ unpaid_earnings = float(unpaid_earnings.split()[0].replace(',', ''))
+ except (ValueError, IndexError):
+ unpaid_earnings = 0
+
+ # Use unpaid_earnings as total_earnings
+ total_earnings = unpaid_earnings
+
+ # Debug log
+ logging.info(f"Using unpaid_earnings as total_earnings: {unpaid_earnings} BTC")
+
+ avg_acceptance_rate = sum([float(w.get('acceptance_rate', 0) or 0) for w in workers_data]) / len(workers_data) if workers_data else 0
+
+ # IMPORTANT: Use the EXACT original value for total_hashrate
+ # Do NOT recalculate it from worker data
+ total_hashrate = original_hashrate_3hr
+
+ # Daily sats from main dashboard
+ daily_sats = cached_metrics.get("daily_mined_sats", 0)
+
+ # Create hashrate history based on arrow_history if available
+ hashrate_history = []
+ if cached_metrics.get("arrow_history") and cached_metrics["arrow_history"].get("hashrate_3hr"):
+ hashrate_history = cached_metrics["arrow_history"]["hashrate_3hr"]
+
+ result = {
+ "workers": workers_data,
+ "workers_total": len(workers_data),
+ "workers_online": workers_online,
+ "workers_offline": workers_offline,
+ "total_hashrate": total_hashrate, # EXACT value from main dashboard
+ "hashrate_unit": hashrate_unit,
+ "total_earnings": total_earnings, # Now using unpaid_earnings
+ "daily_sats": daily_sats,
+ "avg_acceptance_rate": avg_acceptance_rate,
+ "hashrate_history": hashrate_history,
+ "timestamp": datetime.now(ZoneInfo("America/Los_Angeles")).isoformat()
+ }
+
+ # Update cache
+ self.worker_data_cache = result
+ self.last_worker_data_update = current_time
+
+ return result
+ except Exception as e:
+ logging.error(f"Error getting worker data: {e}")
+ return self.generate_default_workers_data()
+
+ def generate_workers_data(self, num_workers, total_hashrate, hashrate_unit, total_unpaid_earnings=None):
+ """
+ Generate simulated worker data based on total hashrate, ensuring total matches exactly.
+ Also distributes unpaid earnings proportionally when provided.
+
+ Args:
+ num_workers (int): Number of workers
+ total_hashrate (float): Total hashrate
+ hashrate_unit (str): Hashrate unit
+ total_unpaid_earnings (float, optional): Total unpaid earnings
+
+ Returns:
+ list: List of worker data dictionaries
+ """
+ # Worker model types for simulation
+ models = [
+ {"type": "ASIC", "model": "Bitmain Antminer S19 Pro", "max_hashrate": 110, "power": 3250},
+ {"type": "ASIC", "model": "MicroBT Whatsminer M50S", "max_hashrate": 130, "power": 3276},
+ {"type": "ASIC", "model": "Bitmain Antminer S19j Pro", "max_hashrate": 104, "power": 3150},
+ {"type": "FPGA", "model": "BitAxe FPGA Miner", "max_hashrate": 3.2, "power": 35}
+ ]
+
+ # Worker names for simulation
+ prefixes = ["Antminer", "Whatsminer", "Miner", "Rig", "Node", "Worker", "BitAxe", "BTC"]
+
+ # Calculate hashrate distribution - majority of hashrate to online workers
+ online_count = max(1, int(num_workers * 0.8)) # At least 1 online worker
+ offline_count = num_workers - online_count
+
+ # Average hashrate per online worker
+ avg_hashrate = total_hashrate / online_count if online_count > 0 else 0
+
+ workers = []
+ current_time = datetime.now(ZoneInfo("America/Los_Angeles"))
+
+ # Default total unpaid earnings if not provided
+ if total_unpaid_earnings is None or total_unpaid_earnings <= 0:
+ total_unpaid_earnings = 0.001 # Default small amount
+
+ # Generate online workers
+ for i in range(online_count):
+ # Select a model based on hashrate
+ model_info = models[0] if avg_hashrate > 50 else models[-1] if avg_hashrate < 5 else random.choice(models)
+
+ # For Antminers and regular ASICs, use ASIC model
+ if i < online_count - 1 or avg_hashrate > 5:
+ model_idx = random.randint(0, len(models) - 2) # Exclude FPGA for most workers
+ else:
+ model_idx = len(models) - 1 # FPGA for last worker if small hashrate
+
+ model_info = models[model_idx]
+
+ # Generate hashrate with some random variation
+ base_hashrate = min(model_info["max_hashrate"], avg_hashrate * random.uniform(0.5, 1.5))
+ hashrate_60sec = round(base_hashrate * random.uniform(0.9, 1.1), 2)
+ hashrate_3hr = round(base_hashrate * random.uniform(0.85, 1.0), 2)
+
+ # Generate last share time (within last 5 minutes)
+ minutes_ago = random.randint(0, 5)
+ last_share = (current_time - timedelta(minutes=minutes_ago)).strftime("%Y-%m-%d %H:%M")
+
+ # Generate acceptance rate (95-100%)
+ acceptance_rate = round(random.uniform(95, 100), 1)
+
+ # Generate temperature (normal operating range)
+ temperature = random.randint(55, 70) if model_info["type"] == "ASIC" else random.randint(45, 55)
+
+ # Create a unique name
+ if model_info["type"] == "FPGA":
+ name = f"{prefixes[-1]}{random.randint(1, 99):02d}"
+ else:
+ name = f"{random.choice(prefixes[:-1])}{random.randint(1, 99):02d}"
+
+ workers.append({
+ "name": name,
+ "status": "online",
+ "type": model_info["type"],
+ "model": model_info["model"],
+ "hashrate_60sec": hashrate_60sec,
+ "hashrate_60sec_unit": hashrate_unit,
+ "hashrate_3hr": hashrate_3hr,
+ "hashrate_3hr_unit": hashrate_unit,
+ "efficiency": round(random.uniform(65, 95), 1),
+ "last_share": last_share,
+ "earnings": 0, # Will be set after all workers are generated
+ "acceptance_rate": acceptance_rate,
+ "power_consumption": model_info["power"],
+ "temperature": temperature
+ })
+
+ # Generate offline workers
+ for i in range(offline_count):
+ # Select a model - more likely to be FPGA for offline
+ if random.random() > 0.6:
+ model_info = models[-1] # FPGA
+ else:
+ model_info = random.choice(models[:-1]) # ASIC
+
+ # Generate last share time (0.5 to 8 hours ago)
+ hours_ago = random.uniform(0.5, 8)
+ last_share = (current_time - timedelta(hours=hours_ago)).strftime("%Y-%m-%d %H:%M")
+
+ # Generate hashrate (historical before going offline)
+ if model_info["type"] == "FPGA":
+ hashrate_3hr = round(random.uniform(1, 3), 2)
+ else:
+ hashrate_3hr = round(random.uniform(20, 90), 2)
+
+ # Create a unique name
+ if model_info["type"] == "FPGA":
+ name = f"{prefixes[-1]}{random.randint(1, 99):02d}"
+ else:
+ name = f"{random.choice(prefixes[:-1])}{random.randint(1, 99):02d}"
+
+ workers.append({
+ "name": name,
+ "status": "offline",
+ "type": model_info["type"],
+ "model": model_info["model"],
+ "hashrate_60sec": 0,
+ "hashrate_60sec_unit": hashrate_unit,
+ "hashrate_3hr": hashrate_3hr,
+ "hashrate_3hr_unit": hashrate_unit,
+ "efficiency": 0,
+ "last_share": last_share,
+ "earnings": 0, # Minimal earnings for offline workers
+ "acceptance_rate": round(random.uniform(95, 99), 1),
+ "power_consumption": 0,
+ "temperature": 0
+ })
+
+ # --- NEW CODE FOR HASHRATE ALIGNMENT ---
+ # Calculate the current sum of online worker hashrates
+ current_total = sum(w["hashrate_3hr"] for w in workers if w["status"] == "online")
+
+ # If we have online workers and the total doesn't match, apply a scaling factor
+ if online_count > 0 and abs(current_total - total_hashrate) > 0.01:
+ scaling_factor = total_hashrate / current_total if current_total > 0 else 1
+
+ # Apply scaling to all online workers
+ for worker in workers:
+ if worker["status"] == "online":
+ # Scale the 3hr hashrate to exactly match total
+ worker["hashrate_3hr"] = round(worker["hashrate_3hr"] * scaling_factor, 2)
+
+ # Scale the 60sec hashrate proportionally
+ if worker["hashrate_60sec"] > 0:
+ worker["hashrate_60sec"] = round(worker["hashrate_60sec"] * scaling_factor, 2)
+
+ # --- NEW CODE TO DISTRIBUTE UNPAID EARNINGS PROPORTIONALLY ---
+ # First calculate the total effective hashrate (only from online workers)
+ total_effective_hashrate = sum(w["hashrate_3hr"] for w in workers if w["status"] == "online")
+
+ # Reserve a small portion (5%) of earnings for offline workers
+ online_earnings_pool = total_unpaid_earnings * 0.95
+ offline_earnings_pool = total_unpaid_earnings * 0.05
+
+ # Distribute earnings based on hashrate proportion for online workers
+ if total_effective_hashrate > 0:
+ for worker in workers:
+ if worker["status"] == "online":
+ hashrate_proportion = worker["hashrate_3hr"] / total_effective_hashrate
+ worker["earnings"] = round(online_earnings_pool * hashrate_proportion, 8)
+
+ # Distribute minimal earnings to offline workers
+ if offline_count > 0:
+ offline_per_worker = offline_earnings_pool / offline_count
+ for worker in workers:
+ if worker["status"] == "offline":
+ worker["earnings"] = round(offline_per_worker, 8)
+
+ # Final verification - ensure total earnings match
+ current_total_earnings = sum(w["earnings"] for w in workers)
+ if abs(current_total_earnings - total_unpaid_earnings) > 0.00000001:
+ # Adjust the first worker to account for any rounding errors
+ adjustment = total_unpaid_earnings - current_total_earnings
+ for worker in workers:
+ if worker["status"] == "online":
+ worker["earnings"] = round(worker["earnings"] + adjustment, 8)
+ break
+
+ return workers