Enhance data handling and UI responsiveness

- Updated `save_graph_state` in `state_manager.py` to include unit preservation for `arrow_history` and `metrics_log`, with improved data size logging and exception handling.
- Modified `notifications.css` for better mobile responsiveness, including a grid layout for `.filter-buttons` and adjusted button sizes.
- Changed value display in `initializeChart` to use 'PH' for petahashes, aligning with new unit standards.
- Ensured consistent unit formatting in `updateChartWithNormalizedData` for the 24-hour average line.
- Enhanced precision in `updateUI` for unpaid earnings display.
- Simplified y-axis label in chart configuration and updated value formatting to reflect new unit standards.
This commit is contained in:
DJObleezy 2025-04-25 22:13:01 -07:00
parent 312031dcae
commit e0c5f085cc
3 changed files with 73 additions and 78 deletions

View File

@ -125,87 +125,63 @@ class StateManager:
if not self.redis_client: if not self.redis_client:
logging.info("Redis not available, skipping state save.") logging.info("Redis not available, skipping state save.")
return return
# Check if we've saved recently to avoid too frequent saves
# Only save at most once every 5 minutes
current_time = time.time() current_time = time.time()
if hasattr(self, 'last_save_time') and current_time - self.last_save_time < 300: # 300 seconds = 5 minutes if hasattr(self, 'last_save_time') and current_time - self.last_save_time < 300: # 5 minutes
logging.debug("Skipping Redis save - last save was less than 5 minutes ago") logging.debug("Skipping Redis save - last save was less than 5 minutes ago")
return return
# Update the last save time
self.last_save_time = current_time self.last_save_time = current_time
# Prune data first to reduce volume
self.prune_old_data() self.prune_old_data()
# Create compact versions of the data structures for Redis storage
try: try:
# 1. Create compact arrow_history with minimal data # Compact arrow_history with unit preservation
compact_arrow_history = {} compact_arrow_history = {}
for key, values in arrow_history.items(): for key, values in arrow_history.items():
if isinstance(values, list) and values: if isinstance(values, list) and values:
# Only store recent history (last 2 hours)
recent_values = values[-180:] if len(values) > 180 else values recent_values = values[-180:] if len(values) > 180 else values
# Use shorter field names and preserve arrow directions
compact_arrow_history[key] = [ compact_arrow_history[key] = [
{"t": entry["time"], "v": entry["value"], "a": entry["arrow"]} {"t": entry["time"], "v": entry["value"], "a": entry["arrow"], "u": entry.get("unit", "th/s")}
for entry in recent_values for entry in recent_values
] ]
# 2. Only keep essential hashrate_history # Compact hashrate_history
compact_hashrate_history = hashrate_history[-60:] if len(hashrate_history) > 60 else hashrate_history compact_hashrate_history = hashrate_history[-60:] if len(hashrate_history) > 60 else hashrate_history
# 3. Only keep recent metrics_log entries (last 30 minutes) # Compact metrics_log with unit preservation
# This is typically the largest data structure
compact_metrics_log = [] compact_metrics_log = []
if metrics_log: if metrics_log:
# Keep only last 30 entries (30 minutes assuming 1-minute updates) recent_logs = metrics_log[-30:]
recent_logs = metrics_log[-30:]
for entry in recent_logs: for entry in recent_logs:
# Only keep necessary fields from each metrics entry metrics_copy = {}
if "metrics" in entry and "timestamp" in entry: original_metrics = entry["metrics"]
metrics_copy = {} essential_keys = [
original_metrics = entry["metrics"] "hashrate_60sec", "hashrate_24hr", "btc_price",
"workers_hashing", "unpaid_earnings", "difficulty",
# Only copy the most important metrics for historical tracking "network_hashrate", "daily_profit_usd"
essential_keys = [ ]
"hashrate_60sec", "hashrate_24hr", "btc_price", for key in essential_keys:
"workers_hashing", "unpaid_earnings", "difficulty", if key in original_metrics:
"network_hashrate", "daily_profit_usd" metrics_copy[key] = {
] "value": original_metrics[key],
"unit": original_metrics.get(f"{key}_unit", "th/s")
for key in essential_keys: }
if key in original_metrics: compact_metrics_log.append({
metrics_copy[key] = original_metrics[key] "ts": entry["timestamp"],
"m": metrics_copy
# Skip arrow_history within metrics as we already stored it separately })
compact_metrics_log.append({
"ts": entry["timestamp"],
"m": metrics_copy
})
# Create the final state object
state = { state = {
"arrow_history": compact_arrow_history, "arrow_history": compact_arrow_history,
"hashrate_history": compact_hashrate_history, "hashrate_history": compact_hashrate_history,
"metrics_log": compact_metrics_log "metrics_log": compact_metrics_log
} }
# Convert to JSON once to reuse and measure size
state_json = json.dumps(state) state_json = json.dumps(state)
data_size_kb = len(state_json) / 1024 data_size_kb = len(state_json) / 1024
# Log data size for monitoring
logging.info(f"Saving graph state to Redis: {data_size_kb:.2f} KB (optimized format)") logging.info(f"Saving graph state to Redis: {data_size_kb:.2f} KB (optimized format)")
# Only save if data size is reasonable (adjust threshold as needed) self.redis_client.set(f"{self.STATE_KEY}_version", "2.0")
if data_size_kb > 2000: # 2MB warning threshold (reduced from 5MB)
logging.warning(f"Redis save data size is still large: {data_size_kb:.2f} KB")
# Store version info to handle future format changes
self.redis_client.set(f"{self.STATE_KEY}_version", "2.0")
self.redis_client.set(self.STATE_KEY, state_json) self.redis_client.set(self.STATE_KEY, state_json)
logging.info(f"Successfully saved graph state to Redis ({data_size_kb:.2f} KB)") logging.info(f"Successfully saved graph state to Redis ({data_size_kb:.2f} KB)")
except Exception as e: except Exception as e:

View File

@ -283,12 +283,13 @@
.notification-actions { .notification-actions {
flex-direction: column; flex-direction: column;
gap: 8px; gap: 8px;
margin-top: 10px;
} }
.action-button { .action-button {
width: 100%; /* Full width on small screens */ width: 100%; /* Full width on small screens */
padding: 8px 12px; padding: 8px 12px;
font-size: 1rem; font-size: 0.95rem;
} }
.notification-controls { .notification-controls {
@ -297,16 +298,27 @@
} }
.filter-buttons { .filter-buttons {
overflow-x: auto; display: grid;
grid-template-columns: repeat(3, 1fr);
gap: 8px;
padding-bottom: 5px; padding-bottom: 5px;
margin-bottom: 5px; margin-bottom: 10px;
white-space: nowrap; width: 100%;
}
.filter-button {
text-align: center;
white-space: normal;
font-size: 0.9rem;
padding: 8px 5px;
display: flex; display: flex;
flex-wrap: nowrap; align-items: center;
justify-content: center;
min-height: 38px;
} }
.notification-actions { .notification-actions {
justify-content: flex-end; justify-content: stretch;
} }
.notification-item { .notification-item {
@ -324,4 +336,11 @@
.notification-actions { .notification-actions {
flex: 0 0 60px; flex: 0 0 60px;
} }
/* For very small screens, reduce to 2 columns */
@media (max-width: 375px) {
.filter-buttons {
grid-template-columns: repeat(2, 1fr);
}
}
} }

View File

@ -878,15 +878,11 @@ function initializeChart() {
// For zero, just return 0 // For zero, just return 0
if (value === 0) return '0'; if (value === 0) return '0';
// For very large values (1M+) // For large values (1000+ TH/s), show in PH/s
if (value >= 1000000) { if (value >= 1000) {
return (value / 1000000).toFixed(1) + 'M'; return (value / 1000).toFixed(1) + ' PH';
} }
// For large values (1K+) // For values between 10 and 1000 TH/s
else if (value >= 1000) {
return (value / 1000).toFixed(1) + 'K';
}
// For values between 10 and 1000
else if (value >= 10) { else if (value >= 10) {
return Math.round(value); return Math.round(value);
} }
@ -1095,6 +1091,7 @@ function updateChartWithNormalizedData(chart, data) {
const avg24hrUnit = data.hashrate_24hr_unit ? data.hashrate_24hr_unit.toLowerCase() : 'th/s'; const avg24hrUnit = data.hashrate_24hr_unit ? data.hashrate_24hr_unit.toLowerCase() : 'th/s';
const normalizedAvg = normalizeHashrate(avg24hr, avg24hrUnit); const normalizedAvg = normalizeHashrate(avg24hr, avg24hrUnit);
// Update the 24HR AVG line using the existing formatHashrateForDisplay function
if (!isNaN(normalizedAvg) && if (!isNaN(normalizedAvg) &&
chart.options.plugins.annotation && chart.options.plugins.annotation &&
chart.options.plugins.annotation.annotations && chart.options.plugins.annotation.annotations &&
@ -1102,7 +1099,10 @@ function updateChartWithNormalizedData(chart, data) {
const annotation = chart.options.plugins.annotation.annotations.averageLine; const annotation = chart.options.plugins.annotation.annotations.averageLine;
annotation.yMin = normalizedAvg; annotation.yMin = normalizedAvg;
annotation.yMax = normalizedAvg; annotation.yMax = normalizedAvg;
annotation.label.content = '24HR AVG: ' + normalizedAvg.toFixed(1) + ' TH/S';
// Use the formatting function already available to ensure consistent units
const formattedAvg = formatHashrateForDisplay(normalizedAvg);
annotation.label.content = '24HR AVG: ' + formattedAvg.toUpperCase();
} }
// Detect low hashrate devices (Bitaxe < 2 TH/s) // Detect low hashrate devices (Bitaxe < 2 TH/s)
@ -1746,7 +1746,7 @@ function updateUI() {
// Update worker count from metrics (just the number, not full worker data) // Update worker count from metrics (just the number, not full worker data)
updateWorkersCount(); updateWorkersCount();
updateElementText("unpaid_earnings", data.unpaid_earnings + " BTC"); updateElementText("unpaid_earnings", data.unpaid_earnings.toFixed(8) + " BTC");
// Update payout estimation with color coding // Update payout estimation with color coding
const payoutText = data.est_time_to_payout; const payoutText = data.est_time_to_payout;
@ -2013,7 +2013,7 @@ $(document).ready(function () {
y: { y: {
title: { title: {
display: true, display: true,
text: 'HASHRATE (TH/S)', text: 'HASHRATE',
color: theme.PRIMARY, color: theme.PRIMARY,
font: { font: {
family: "'VT323', monospace", family: "'VT323', monospace",
@ -2035,15 +2035,15 @@ $(document).ready(function () {
// For zero, just return 0 // For zero, just return 0
if (value === 0) return '0'; if (value === 0) return '0';
// For very large values (1M+) // For very large values (1M+ TH/s = 1000+ PH/s)
if (value >= 1000000) { if (value >= 1000000) {
return (value / 1000000).toFixed(1) + 'M'; return (value / 1000000).toFixed(1) + 'E'; // Show as EH/s
} }
// For large values (1K+) // For large values (1000+ TH/s), show in PH/s
else if (value >= 1000) { else if (value >= 1000) {
return (value / 1000).toFixed(1) + 'K'; return (value / 1000).toFixed(1) + 'P'; // Show as PH/s
} }
// For values between 10 and 1000 // For values between 10 and 1000 TH/s
else if (value >= 10) { else if (value >= 10) {
return Math.round(value); return Math.round(value);
} }