diff --git a/node/rustchain_bft_consensus.py b/node/rustchain_bft_consensus.py index 7041d51e..177f09a2 100644 --- a/node/rustchain_bft_consensus.py +++ b/node/rustchain_bft_consensus.py @@ -1,954 +1,954 @@ -#!/usr/bin/env python3 -""" -RustChain BFT Consensus Module - RIP-0202 -Byzantine Fault Tolerant Consensus for Multi-Node Operation - -This module implements a simplified PBFT (Practical Byzantine Fault Tolerance) -consensus mechanism adapted for RustChain's Proof of Antiquity (PoA) model. - -Key Features: -- 3-phase consensus: PRE-PREPARE, PREPARE, COMMIT -- Tolerates f byzantine nodes where total = 3f + 1 -- Epoch-based consensus (one decision per epoch) -- View change for leader failure -- Integrated with PoA hardware attestation - -Author: RustChain Team -RIP: 0202 -Version: 1.0.0 -""" - -import hashlib -import hmac -import json -import logging -import sqlite3 -import threading -import time -from dataclasses import dataclass, asdict -from enum import Enum -from typing import Dict, List, Optional, Set, Tuple -import requests - -# Configure logging -logging.basicConfig(level=logging.INFO, format='%(asctime)s [BFT] %(message)s') - -# ============================================================================ -# CONSTANTS -# ============================================================================ - -BLOCK_TIME = 600 # 10 minutes per epoch -PREPARE_THRESHOLD = 2/3 # Need 2/3 of nodes to prepare -COMMIT_THRESHOLD = 2/3 # Need 2/3 of nodes to commit -VIEW_CHANGE_TIMEOUT = 90 # Seconds before triggering view change -CONSENSUS_MESSAGE_TTL = 300 # 5 minutes message validity - - -class ConsensusPhase(Enum): - IDLE = "idle" - PRE_PREPARE = "pre_prepare" - PREPARE = "prepare" - COMMIT = "commit" - COMMITTED = "committed" - VIEW_CHANGE = "view_change" - - -class MessageType(Enum): - PRE_PREPARE = "pre_prepare" - PREPARE = "prepare" - COMMIT = "commit" - VIEW_CHANGE = "view_change" - NEW_VIEW = "new_view" - CHECKPOINT = "checkpoint" - - -# ============================================================================ -# DATA STRUCTURES -# ============================================================================ - -@dataclass -class ConsensusMessage: - """Message structure for BFT consensus""" - msg_type: str - view: int # Current view number - epoch: int # RustChain epoch - digest: str # Hash of proposal - node_id: str # Sender node ID - signature: str # HMAC signature - timestamp: int # Unix timestamp - proposal: Optional[Dict] = None # Actual data (only in PRE-PREPARE) - - def to_dict(self) -> Dict: - return asdict(self) - - @staticmethod - def from_dict(data: Dict) -> 'ConsensusMessage': - return ConsensusMessage(**data) - - def compute_digest(self) -> str: - """Compute digest of the proposal""" - if self.proposal: - return hashlib.sha256(json.dumps(self.proposal, sort_keys=True).encode()).hexdigest() - return self.digest - - -@dataclass -class EpochProposal: - """Proposal for epoch settlement""" - epoch: int - miners: List[Dict] # Miner attestations - total_reward: float # 1.5 RTC per epoch - distribution: Dict[str, float] # miner_id -> reward - proposer: str # Node that created proposal - merkle_root: str # Merkle root of miner data - - def compute_digest(self) -> str: - data = { - 'epoch': self.epoch, - 'miners': self.miners, - 'total_reward': self.total_reward, - 'distribution': self.distribution, - 'proposer': self.proposer, - 'merkle_root': self.merkle_root - } - return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest() - - -@dataclass -class ViewChangeMessage: - """View change request""" - view: int - epoch: int - node_id: str - prepared_cert: Optional[Dict] # Proof of prepared state - signature: str - - -# ============================================================================ -# BFT CONSENSUS ENGINE -# ============================================================================ - -class BFTConsensus: - """ - Practical Byzantine Fault Tolerance (PBFT) consensus engine for RustChain. - - Adapted for Proof of Antiquity: - - No block proposer election (round-robin based on view) - - Consensus on epoch settlements (miner rewards) - - Hardware attestation validation before accepting proposals - """ - - def __init__(self, node_id: str, db_path: str, secret_key: str): - self.node_id = node_id - self.db_path = db_path - self.secret_key = secret_key - - # State - self.current_view = 0 - self.current_epoch = 0 - self.phase = ConsensusPhase.IDLE - - # Message logs - self.pre_prepare_log: Dict[int, ConsensusMessage] = {} # epoch -> message - self.prepare_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} - self.commit_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} - self.view_change_log: Dict[int, Dict[str, ViewChangeMessage]] = {} # view -> {node_id: msg} - - # Committed epochs - self.committed_epochs: Set[int] = set() - - # Peer nodes - self.peers: Dict[str, str] = {} # node_id -> url - - # Thread synchronization - self.lock = threading.RLock() - - # Timer for view change - self.view_change_timer: Optional[threading.Timer] = None - - # Initialize database - self._init_db() - - def _init_db(self): - """Initialize BFT consensus tables""" - with sqlite3.connect(self.db_path) as conn: - # Consensus log table - conn.execute(""" - CREATE TABLE IF NOT EXISTS bft_consensus_log ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - epoch INTEGER NOT NULL, - view INTEGER NOT NULL, - msg_type TEXT NOT NULL, - node_id TEXT NOT NULL, - digest TEXT NOT NULL, - proposal_json TEXT, - signature TEXT NOT NULL, - timestamp INTEGER NOT NULL, - UNIQUE(epoch, msg_type, node_id) - ) - """) - - # Committed epochs table - conn.execute(""" - CREATE TABLE IF NOT EXISTS bft_committed_epochs ( - epoch INTEGER PRIMARY KEY, - view INTEGER NOT NULL, - digest TEXT NOT NULL, - committed_at INTEGER NOT NULL, - proposal_json TEXT NOT NULL - ) - """) - - # View change log - conn.execute(""" - CREATE TABLE IF NOT EXISTS bft_view_changes ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - view INTEGER NOT NULL, - node_id TEXT NOT NULL, - timestamp INTEGER NOT NULL, - UNIQUE(view, node_id) - ) - """) - - conn.commit() - - logging.info(f"BFT consensus initialized for node {self.node_id}") - - def register_peer(self, node_id: str, url: str): - """Register a peer node""" - with self.lock: - self.peers[node_id] = url - logging.info(f"Registered peer: {node_id} at {url}") - - def get_total_nodes(self) -> int: - """Get total number of nodes including self""" - return len(self.peers) + 1 - - def get_fault_tolerance(self) -> int: - """Calculate f (max faulty nodes we can tolerate)""" - # BFT requires n >= 3f + 1, so we can tolerate f = floor((n-1)/3) faulty nodes. - # E.g., 4 nodes → f=1: one Byzantine node cannot forge a 2/3 quorum. - n = self.get_total_nodes() - return (n - 1) // 3 - - def get_quorum_size(self) -> int: - """Get quorum size for consensus""" - # Quorum = 2f + 1 = ceil(2n/3). Using integer arithmetic (2n+2)//3 avoids - # floating point and always rounds up, ensuring we exceed the 2/3 threshold. - n = self.get_total_nodes() - return (2 * n + 2) // 3 - - def is_leader(self, view: int = None) -> bool: - """Check if this node is the leader for current view""" - if view is None: - view = self.current_view - - # Deterministic round-robin: sorting by node_id ensures all nodes agree on - # the leader ordering without a separate election or coordinator. - nodes = sorted([self.node_id] + list(self.peers.keys())) - leader_idx = view % len(nodes) - return nodes[leader_idx] == self.node_id - - def get_leader(self, view: int = None) -> str: - """Get the leader node ID for a view""" - if view is None: - view = self.current_view - - nodes = sorted([self.node_id] + list(self.peers.keys())) - leader_idx = view % len(nodes) - return nodes[leader_idx] - - def _sign_message(self, data: str) -> str: - """Sign a message with HMAC""" - return hmac.new( - self.secret_key.encode(), - data.encode(), - hashlib.sha256 - ).hexdigest() - - def _verify_signature(self, node_id: str, data: str, signature: str) -> bool: - """Verify message signature (simplified - all nodes share key in testnet)""" - # In production, each node would have its own keypair (ed25519 or similar). - # Shared HMAC is acceptable in a trusted-operator testnet but means one - # compromised node can forge messages from any peer. - # hmac.compare_digest prevents timing side-channel leaks. - expected = hmac.new( - self.secret_key.encode(), - data.encode(), - hashlib.sha256 - ).hexdigest() - return hmac.compare_digest(signature, expected) - - # ======================================================================== - # PHASE 1: PRE-PREPARE (Leader proposes) - # ======================================================================== - - def propose_epoch_settlement(self, epoch: int, miners: List[Dict], - distribution: Dict[str, float]) -> Optional[ConsensusMessage]: - """ - Leader proposes epoch settlement (PRE-PREPARE phase). - Only the leader for current view can call this. - """ - with self.lock: - if not self.is_leader(): - logging.warning(f"Node {self.node_id} is not leader for view {self.current_view}") - return None - - if epoch in self.committed_epochs: - logging.info(f"Epoch {epoch} already committed") - return None - - # Create proposal - proposal = EpochProposal( - epoch=epoch, - miners=miners, - total_reward=1.5, # RTC per epoch - distribution=distribution, - proposer=self.node_id, - merkle_root=self._compute_merkle_root(miners) - ) - - digest = proposal.compute_digest() - timestamp = int(time.time()) - - # Sign the message - sign_data = f"{MessageType.PRE_PREPARE.value}:{self.current_view}:{epoch}:{digest}:{timestamp}" - signature = self._sign_message(sign_data) - - # Create PRE-PREPARE message - msg = ConsensusMessage( - msg_type=MessageType.PRE_PREPARE.value, - view=self.current_view, - epoch=epoch, - digest=digest, - node_id=self.node_id, - signature=signature, - timestamp=timestamp, - proposal=asdict(proposal) - ) - - # Log locally - self.pre_prepare_log[epoch] = msg - self.phase = ConsensusPhase.PRE_PREPARE - self._save_message_to_db(msg) - - # Start view change timer - self._start_view_change_timer() - - # Broadcast to peers - self._broadcast_message(msg) - - logging.info(f"[PRE-PREPARE] Leader proposed epoch {epoch} settlement") - - # Leader also prepares - self._handle_pre_prepare(msg) - - return msg - - def _compute_merkle_root(self, miners: List[Dict]) -> str: - """Compute merkle root of miner attestations""" - if not miners: - return hashlib.sha256(b"empty").hexdigest() - - # Simple merkle: hash all miner data - hashes = [ - hashlib.sha256(json.dumps(m, sort_keys=True).encode()).hexdigest() - for m in miners - ] - - while len(hashes) > 1: - # Duplicate the last leaf when the count is odd so we always pair evenly. - # This is the standard Bitcoin-style merkle padding strategy. - if len(hashes) % 2 == 1: - hashes.append(hashes[-1]) - new_hashes = [] - for i in range(0, len(hashes), 2): - combined = hashes[i] + hashes[i + 1] - new_hashes.append(hashlib.sha256(combined.encode()).hexdigest()) - hashes = new_hashes - - return hashes[0] - - # ======================================================================== - # PHASE 2: PREPARE (Nodes validate and prepare) - # ======================================================================== - - def _handle_pre_prepare(self, msg: ConsensusMessage) -> Optional[ConsensusMessage]: - """Handle received PRE-PREPARE message""" - with self.lock: - epoch = msg.epoch - - # Validate message - if msg.view != self.current_view: - logging.warning(f"PRE-PREPARE for wrong view: {msg.view} != {self.current_view}") - return None - - if epoch in self.committed_epochs: - logging.info(f"Epoch {epoch} already committed") - return None - - # Verify it's from the leader - if msg.node_id != self.get_leader(msg.view): - logging.warning(f"PRE-PREPARE not from leader: {msg.node_id}") - return None - - # Validate proposal (hardware attestation checks) - if not self._validate_proposal(msg.proposal): - logging.warning(f"Invalid proposal for epoch {epoch}") - return None - - # Store PRE-PREPARE - if epoch not in self.pre_prepare_log: - self.pre_prepare_log[epoch] = msg - self._save_message_to_db(msg) - - # Send PREPARE message - timestamp = int(time.time()) - sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{timestamp}" - signature = self._sign_message(sign_data) - - prepare_msg = ConsensusMessage( - msg_type=MessageType.PREPARE.value, - view=msg.view, - epoch=epoch, - digest=msg.digest, - node_id=self.node_id, - signature=signature, - timestamp=timestamp - ) - - # Log prepare - if epoch not in self.prepare_log: - self.prepare_log[epoch] = {} - self.prepare_log[epoch][self.node_id] = prepare_msg - self._save_message_to_db(prepare_msg) - - self.phase = ConsensusPhase.PREPARE - - # Broadcast PREPARE - self._broadcast_message(prepare_msg) - - logging.info(f"[PREPARE] Node {self.node_id} prepared epoch {epoch}") - - # Check if we have quorum to commit - self._check_prepare_quorum(epoch) - - return prepare_msg - - def handle_prepare(self, msg: ConsensusMessage): - """Handle received PREPARE message from peer""" - with self.lock: - epoch = msg.epoch - - # Validate - if msg.view != self.current_view: - return - - if epoch in self.committed_epochs: - return - - # Verify signature - sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" - if not self._verify_signature(msg.node_id, sign_data, msg.signature): - logging.warning(f"Invalid PREPARE signature from {msg.node_id}") - return - - # Store prepare - if epoch not in self.prepare_log: - self.prepare_log[epoch] = {} - - if msg.node_id not in self.prepare_log[epoch]: - self.prepare_log[epoch][msg.node_id] = msg - self._save_message_to_db(msg) - logging.info(f"[PREPARE] Received from {msg.node_id} for epoch {epoch}") - - # Check quorum - self._check_prepare_quorum(epoch) - - def _check_prepare_quorum(self, epoch: int): - """Check if we have quorum of PREPARE messages""" - if epoch not in self.prepare_log: - return - - prepare_count = len(self.prepare_log[epoch]) - quorum = self.get_quorum_size() - - logging.info(f"[PREPARE] Epoch {epoch}: {prepare_count}/{quorum} prepares") - - # Phase guard prevents sending duplicate COMMITs if more PREPAREs arrive - # after we already advanced — only transition once per epoch. - if prepare_count >= quorum and self.phase == ConsensusPhase.PREPARE: - # Transition to COMMIT phase - self._send_commit(epoch) - - # ======================================================================== - # PHASE 3: COMMIT (Finalize consensus) - # ======================================================================== - - def _send_commit(self, epoch: int): - """Send COMMIT message after receiving quorum of PREPAREs""" - with self.lock: - if epoch not in self.pre_prepare_log: - return - - pre_prepare = self.pre_prepare_log[epoch] - timestamp = int(time.time()) - - sign_data = f"{MessageType.COMMIT.value}:{pre_prepare.view}:{epoch}:{pre_prepare.digest}:{timestamp}" - signature = self._sign_message(sign_data) - - commit_msg = ConsensusMessage( - msg_type=MessageType.COMMIT.value, - view=pre_prepare.view, - epoch=epoch, - digest=pre_prepare.digest, - node_id=self.node_id, - signature=signature, - timestamp=timestamp - ) - - # Log commit - if epoch not in self.commit_log: - self.commit_log[epoch] = {} - self.commit_log[epoch][self.node_id] = commit_msg - self._save_message_to_db(commit_msg) - - self.phase = ConsensusPhase.COMMIT - - # Broadcast COMMIT - self._broadcast_message(commit_msg) - - logging.info(f"[COMMIT] Node {self.node_id} committed epoch {epoch}") - - # Check commit quorum - self._check_commit_quorum(epoch) - - def handle_commit(self, msg: ConsensusMessage): - """Handle received COMMIT message""" - with self.lock: - epoch = msg.epoch - - if epoch in self.committed_epochs: - return - - # Verify signature - sign_data = f"{MessageType.COMMIT.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" - if not self._verify_signature(msg.node_id, sign_data, msg.signature): - logging.warning(f"Invalid COMMIT signature from {msg.node_id}") - return - - # Store commit - if epoch not in self.commit_log: - self.commit_log[epoch] = {} - - if msg.node_id not in self.commit_log[epoch]: - self.commit_log[epoch][msg.node_id] = msg - self._save_message_to_db(msg) - logging.info(f"[COMMIT] Received from {msg.node_id} for epoch {epoch}") - - # Check quorum - self._check_commit_quorum(epoch) - - def _check_commit_quorum(self, epoch: int): - """Check if we have quorum of COMMIT messages""" - if epoch not in self.commit_log: - return - - commit_count = len(self.commit_log[epoch]) - quorum = self.get_quorum_size() - - logging.info(f"[COMMIT] Epoch {epoch}: {commit_count}/{quorum} commits") - - if commit_count >= quorum and epoch not in self.committed_epochs: - # CONSENSUS REACHED! - self._finalize_epoch(epoch) - - def _finalize_epoch(self, epoch: int): - """Finalize epoch after consensus reached""" - with self.lock: - if epoch in self.committed_epochs: - return - - self.committed_epochs.add(epoch) - self.phase = ConsensusPhase.COMMITTED - - # Cancel view change timer - self._cancel_view_change_timer() - - # Get the proposal - pre_prepare = self.pre_prepare_log.get(epoch) - if not pre_prepare or not pre_prepare.proposal: - logging.error(f"No proposal found for committed epoch {epoch}") - return - - # Save to committed epochs table - with sqlite3.connect(self.db_path) as conn: - conn.execute(""" - INSERT OR REPLACE INTO bft_committed_epochs - (epoch, view, digest, committed_at, proposal_json) - VALUES (?, ?, ?, ?, ?) - """, (epoch, self.current_view, pre_prepare.digest, - int(time.time()), json.dumps(pre_prepare.proposal))) - conn.commit() - - logging.info(f"CONSENSUS REACHED for epoch {epoch}") - logging.info(f" Digest: {pre_prepare.digest[:16]}...") - logging.info(f" Proposer: {pre_prepare.proposal.get('proposer')}") - - # Apply the settlement (distribute rewards) - self._apply_settlement(pre_prepare.proposal) - - def _apply_settlement(self, proposal: Dict): - """Apply the consensus settlement to database""" - epoch = proposal.get('epoch') - distribution = proposal.get('distribution', {}) - - with sqlite3.connect(self.db_path) as conn: - for miner_id, reward in distribution.items(): - # Update balance - conn.execute(""" - INSERT INTO balances (miner_id, amount_i64) - VALUES (?, ?) - ON CONFLICT(miner_id) DO UPDATE SET - amount_i64 = amount_i64 + excluded.amount_i64 - # Store as integer micro-RTC (1 RTC = 1,000,000 uRTC) to avoid - # floating-point drift accumulating across many ledger entries. - """, (miner_id, int(reward * 1_000_000))) - - # Log in ledger - conn.execute(""" - INSERT INTO ledger (miner_id, delta_i64, tx_type, memo, ts) - VALUES (?, ?, 'reward', ?, ?) - """, (miner_id, int(reward * 1_000_000), f"epoch_{epoch}_bft", int(time.time()))) - - conn.commit() - - logging.info(f"Applied settlement for epoch {epoch}: {len(distribution)} miners rewarded") - - # ======================================================================== - # VIEW CHANGE (Leader failure handling) - # ======================================================================== - - def _start_view_change_timer(self): - """Start timer for view change if consensus not reached""" - self._cancel_view_change_timer() - - self.view_change_timer = threading.Timer(VIEW_CHANGE_TIMEOUT, self._trigger_view_change) - self.view_change_timer.daemon = True - self.view_change_timer.start() - - def _cancel_view_change_timer(self): - """Cancel view change timer""" - if self.view_change_timer: - self.view_change_timer.cancel() - self.view_change_timer = None - - def _trigger_view_change(self): - """Trigger view change due to timeout""" - with self.lock: - logging.warning(f"[VIEW-CHANGE] Timeout! Requesting view {self.current_view + 1}") - self.phase = ConsensusPhase.VIEW_CHANGE - - new_view = self.current_view + 1 - timestamp = int(time.time()) - - sign_data = f"{MessageType.VIEW_CHANGE.value}:{new_view}:{self.current_epoch}:{timestamp}" - signature = self._sign_message(sign_data) - - vc_msg = ViewChangeMessage( - view=new_view, - epoch=self.current_epoch, - node_id=self.node_id, - prepared_cert=None, # Could include prepared certificate - signature=signature - ) - - # Log view change - if new_view not in self.view_change_log: - self.view_change_log[new_view] = {} - self.view_change_log[new_view][self.node_id] = vc_msg - - # Broadcast view change - self._broadcast_view_change(vc_msg) - - # Check if we have quorum for view change - self._check_view_change_quorum(new_view) - - def handle_view_change(self, msg_data: Dict): - """Handle received VIEW-CHANGE message""" - with self.lock: - new_view = msg_data.get('view') - node_id = msg_data.get('node_id') - - if new_view not in self.view_change_log: - self.view_change_log[new_view] = {} - - if node_id not in self.view_change_log[new_view]: - self.view_change_log[new_view][node_id] = ViewChangeMessage(**msg_data) - logging.info(f"[VIEW-CHANGE] Received from {node_id} for view {new_view}") - - self._check_view_change_quorum(new_view) - - def _check_view_change_quorum(self, new_view: int): - """Check if we have quorum for view change""" - if new_view not in self.view_change_log: - return - - vc_count = len(self.view_change_log[new_view]) - quorum = self.get_quorum_size() - - logging.info(f"[VIEW-CHANGE] View {new_view}: {vc_count}/{quorum} votes") - - if vc_count >= quorum: - self._perform_view_change(new_view) - - def _perform_view_change(self, new_view: int): - """Perform view change""" - with self.lock: - if new_view <= self.current_view: - return - - self.current_view = new_view - self.phase = ConsensusPhase.IDLE - - logging.info(f"[NEW-VIEW] Changed to view {new_view}, leader: {self.get_leader()}") - - # If we're the new leader, propose - if self.is_leader(): - logging.info(f"[NEW-VIEW] We are the new leader!") - # New leader should re-propose pending epochs - - # ======================================================================== - # VALIDATION - # ======================================================================== - - def _validate_proposal(self, proposal: Dict) -> bool: - """Validate an epoch settlement proposal""" - if not proposal: - return False - - epoch = proposal.get('epoch') - miners = proposal.get('miners', []) - distribution = proposal.get('distribution', {}) - - # Check epoch is valid - if epoch is None or epoch < 0: - return False - - # Use absolute tolerance rather than ==, since floating-point arithmetic - # on reward fractions can produce values like 1.4999999999 or 1.5000000001. - total = sum(distribution.values()) - if abs(total - 1.5) > 0.001: - logging.warning(f"Invalid total reward: {total} != 1.5") - return False - - # Check all miners in distribution are in miner list - miner_ids = {m.get('miner_id') for m in miners} - for miner_id in distribution: - if miner_id not in miner_ids: - logging.warning(f"Miner {miner_id} in distribution but not in miners list") - return False - - # Verify merkle_root matches the submitted miners list. - # Without this check a Byzantine leader can recycle a valid merkle_root - # from a previous epoch while submitting a different (falsified) miners - # list, and honest nodes would still send PREPARE for the forged proposal. - expected_merkle = self._compute_merkle_root(miners) - if proposal.get('merkle_root') != expected_merkle: - logging.warning( - f"Proposal merkle_root mismatch for epoch {epoch}: " - f"got {proposal.get('merkle_root', '')[:16]}... " - f"expected {expected_merkle[:16]}..." - ) - return False - - return True - - # ======================================================================== - # NETWORK - # ======================================================================== - - def _broadcast_message(self, msg: ConsensusMessage): - """Broadcast message to all peers""" - for node_id, url in self.peers.items(): - try: - endpoint = f"{url}/bft/message" - response = requests.post( - endpoint, - json=msg.to_dict(), - timeout=5, - headers={'X-Node-ID': self.node_id} - ) - if response.ok: - logging.debug(f"Broadcast {msg.msg_type} to {node_id}") - except Exception as e: - logging.error(f"Failed to broadcast to {node_id}: {e}") - - def _broadcast_view_change(self, msg: ViewChangeMessage): - """Broadcast view change message""" - msg_data = asdict(msg) - for node_id, url in self.peers.items(): - try: - endpoint = f"{url}/bft/view_change" - response = requests.post(endpoint, json=msg_data, timeout=5) - if response.ok: - logging.debug(f"Broadcast VIEW-CHANGE to {node_id}") - except Exception as e: - logging.error(f"Failed to broadcast VIEW-CHANGE to {node_id}: {e}") - - def _save_message_to_db(self, msg: ConsensusMessage): - """Save consensus message to database""" - try: - with sqlite3.connect(self.db_path) as conn: - conn.execute(""" - INSERT OR REPLACE INTO bft_consensus_log - (epoch, view, msg_type, node_id, digest, proposal_json, signature, timestamp) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, ( - msg.epoch, msg.view, msg.msg_type, msg.node_id, - msg.digest, json.dumps(msg.proposal) if msg.proposal else None, - msg.signature, msg.timestamp - )) - conn.commit() - except Exception as e: - logging.error(f"Failed to save message: {e}") - - def receive_message(self, msg_data: Dict): - """Handle incoming consensus message""" - msg_type = msg_data.get('msg_type') - - if msg_type == MessageType.PRE_PREPARE.value: - msg = ConsensusMessage.from_dict(msg_data) - self._handle_pre_prepare(msg) - elif msg_type == MessageType.PREPARE.value: - msg = ConsensusMessage.from_dict(msg_data) - self.handle_prepare(msg) - elif msg_type == MessageType.COMMIT.value: - msg = ConsensusMessage.from_dict(msg_data) - self.handle_commit(msg) - - # ======================================================================== - # STATUS - # ======================================================================== - - def get_status(self) -> Dict: - """Get consensus status""" - with self.lock: - return { - 'node_id': self.node_id, - 'current_view': self.current_view, - 'current_epoch': self.current_epoch, - 'phase': self.phase.value, - 'leader': self.get_leader(), - 'is_leader': self.is_leader(), - 'total_nodes': self.get_total_nodes(), - 'fault_tolerance': self.get_fault_tolerance(), - 'quorum_size': self.get_quorum_size(), - 'committed_epochs': len(self.committed_epochs), - 'peers': list(self.peers.keys()) - } - - -# ============================================================================ -# FLASK ROUTES FOR BFT -# ============================================================================ - -def create_bft_routes(app, bft: BFTConsensus): - """Add BFT consensus routes to Flask app""" - from flask import request, jsonify - - @app.route('/bft/status', methods=['GET']) - def bft_status(): - """Get BFT consensus status""" - return jsonify(bft.get_status()) - - @app.route('/bft/message', methods=['POST']) - def bft_receive_message(): - """Receive consensus message from peer""" - try: - msg_data = request.get_json() - bft.receive_message(msg_data) - return jsonify({'status': 'ok'}) - except Exception as e: - logging.error(f"BFT message error: {e}") - return jsonify({'error': str(e)}), 400 - - @app.route('/bft/view_change', methods=['POST']) - def bft_view_change(): - """Receive view change message""" - try: - msg_data = request.get_json() - bft.handle_view_change(msg_data) - return jsonify({'status': 'ok'}) - except Exception as e: - logging.error(f"BFT view change error: {e}") - return jsonify({'error': str(e)}), 400 - - @app.route('/bft/propose', methods=['POST']) - def bft_propose(): - """Manually trigger epoch proposal (admin)""" - try: - data = request.get_json() - epoch = data.get('epoch') - miners = data.get('miners', []) - distribution = data.get('distribution', {}) - - msg = bft.propose_epoch_settlement(epoch, miners, distribution) - if msg: - return jsonify({'status': 'proposed', 'digest': msg.digest}) - else: - return jsonify({'error': 'not_leader_or_already_committed'}), 400 - except Exception as e: - logging.error(f"BFT propose error: {e}") - return jsonify({'error': str(e)}), 500 - - -# ============================================================================ -# MAIN (Testing) -# ============================================================================ - -if __name__ == "__main__": - import sys - - print("=" * 60) - print("RustChain BFT Consensus Module - RIP-0202") - print("=" * 60) - - # Test with mock data - node_id = sys.argv[1] if len(sys.argv) > 1 else "node-131" - db_path = "/tmp/bft_test.db" - secret_key = "rustchain_bft_testnet_key_2025" - - bft = BFTConsensus(node_id, db_path, secret_key) - - # Register peer - bft.register_peer("node-153", "http://50.28.86.153:8099") - - print(f"\nNode: {node_id}") - print(f"Is Leader: {bft.is_leader()}") - print(f"Current View: {bft.current_view}") - print(f"Total Nodes: {bft.get_total_nodes()}") - print(f"Quorum Size: {bft.get_quorum_size()}") - print(f"Fault Tolerance: {bft.get_fault_tolerance()}") - - if bft.is_leader(): - print("\nProposing epoch settlement...") - - # Mock miner data - miners = [ - {'miner_id': 'g4-powerbook-115', 'device_arch': 'G4', 'weight': 2.5}, - {'miner_id': 'sophia-nas-c4130', 'device_arch': 'modern', 'weight': 1.0}, - ] - - total_weight = sum(m['weight'] for m in miners) - distribution = { - m['miner_id']: 1.5 * (m['weight'] / total_weight) - for m in miners - } - - msg = bft.propose_epoch_settlement(epoch=425, miners=miners, distribution=distribution) - if msg: - print(f"Proposed! Digest: {msg.digest[:32]}...") - - print("\n" + "=" * 60) - print("Status:", json.dumps(bft.get_status(), indent=2)) +#!/usr/bin/env python3 +""" +RustChain BFT Consensus Module - RIP-0202 +Byzantine Fault Tolerant Consensus for Multi-Node Operation + +This module implements a simplified PBFT (Practical Byzantine Fault Tolerance) +consensus mechanism adapted for RustChain's Proof of Antiquity (PoA) model. + +Key Features: +- 3-phase consensus: PRE-PREPARE, PREPARE, COMMIT +- Tolerates f byzantine nodes where total = 3f + 1 +- Epoch-based consensus (one decision per epoch) +- View change for leader failure +- Integrated with PoA hardware attestation + +Author: RustChain Team +RIP: 0202 +Version: 1.0.0 +""" + +import hashlib +import hmac +import json +import logging +import sqlite3 +import threading +import time +from dataclasses import dataclass, asdict +from enum import Enum +from typing import Dict, List, Optional, Set, Tuple +import requests + +# Configure logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s [BFT] %(message)s') + +# ============================================================================ +# CONSTANTS +# ============================================================================ + +BLOCK_TIME = 600 # 10 minutes per epoch +PREPARE_THRESHOLD = 2/3 # Need 2/3 of nodes to prepare +COMMIT_THRESHOLD = 2/3 # Need 2/3 of nodes to commit +VIEW_CHANGE_TIMEOUT = 90 # Seconds before triggering view change +CONSENSUS_MESSAGE_TTL = 300 # 5 minutes message validity + + +class ConsensusPhase(Enum): + IDLE = "idle" + PRE_PREPARE = "pre_prepare" + PREPARE = "prepare" + COMMIT = "commit" + COMMITTED = "committed" + VIEW_CHANGE = "view_change" + + +class MessageType(Enum): + PRE_PREPARE = "pre_prepare" + PREPARE = "prepare" + COMMIT = "commit" + VIEW_CHANGE = "view_change" + NEW_VIEW = "new_view" + CHECKPOINT = "checkpoint" + + +# ============================================================================ +# DATA STRUCTURES +# ============================================================================ + +@dataclass +class ConsensusMessage: + """Message structure for BFT consensus""" + msg_type: str + view: int # Current view number + epoch: int # RustChain epoch + digest: str # Hash of proposal + node_id: str # Sender node ID + signature: str # HMAC signature + timestamp: int # Unix timestamp + proposal: Optional[Dict] = None # Actual data (only in PRE-PREPARE) + + def to_dict(self) -> Dict: + return asdict(self) + + @staticmethod + def from_dict(data: Dict) -> 'ConsensusMessage': + return ConsensusMessage(**data) + + def compute_digest(self) -> str: + """Compute digest of the proposal""" + if self.proposal: + return hashlib.sha256(json.dumps(self.proposal, sort_keys=True).encode()).hexdigest() + return self.digest + + +@dataclass +class EpochProposal: + """Proposal for epoch settlement""" + epoch: int + miners: List[Dict] # Miner attestations + total_reward: float # 1.5 RTC per epoch + distribution: Dict[str, float] # miner_id -> reward + proposer: str # Node that created proposal + merkle_root: str # Merkle root of miner data + + def compute_digest(self) -> str: + data = { + 'epoch': self.epoch, + 'miners': self.miners, + 'total_reward': self.total_reward, + 'distribution': self.distribution, + 'proposer': self.proposer, + 'merkle_root': self.merkle_root + } + return hashlib.sha256(json.dumps(data, sort_keys=True).encode()).hexdigest() + + +@dataclass +class ViewChangeMessage: + """View change request""" + view: int + epoch: int + node_id: str + prepared_cert: Optional[Dict] # Proof of prepared state + signature: str + + +# ============================================================================ +# BFT CONSENSUS ENGINE +# ============================================================================ + +class BFTConsensus: + """ + Practical Byzantine Fault Tolerance (PBFT) consensus engine for RustChain. + + Adapted for Proof of Antiquity: + - No block proposer election (round-robin based on view) + - Consensus on epoch settlements (miner rewards) + - Hardware attestation validation before accepting proposals + """ + + def __init__(self, node_id: str, db_path: str, secret_key: str): + self.node_id = node_id + self.db_path = db_path + self.secret_key = secret_key + + # State + self.current_view = 0 + self.current_epoch = 0 + self.phase = ConsensusPhase.IDLE + + # Message logs + self.pre_prepare_log: Dict[int, ConsensusMessage] = {} # epoch -> message + self.prepare_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} + self.commit_log: Dict[int, Dict[str, ConsensusMessage]] = {} # epoch -> {node_id: msg} + self.view_change_log: Dict[int, Dict[str, ViewChangeMessage]] = {} # view -> {node_id: msg} + + # Committed epochs + self.committed_epochs: Set[int] = set() + + # Peer nodes + self.peers: Dict[str, str] = {} # node_id -> url + + # Thread synchronization + self.lock = threading.RLock() + + # Timer for view change + self.view_change_timer: Optional[threading.Timer] = None + + # Initialize database + self._init_db() + + def _init_db(self): + """Initialize BFT consensus tables""" + with sqlite3.connect(self.db_path) as conn: + # Consensus log table + conn.execute(""" + CREATE TABLE IF NOT EXISTS bft_consensus_log ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + epoch INTEGER NOT NULL, + view INTEGER NOT NULL, + msg_type TEXT NOT NULL, + node_id TEXT NOT NULL, + digest TEXT NOT NULL, + proposal_json TEXT, + signature TEXT NOT NULL, + timestamp INTEGER NOT NULL, + UNIQUE(epoch, msg_type, node_id) + ) + """) + + # Committed epochs table + conn.execute(""" + CREATE TABLE IF NOT EXISTS bft_committed_epochs ( + epoch INTEGER PRIMARY KEY, + view INTEGER NOT NULL, + digest TEXT NOT NULL, + committed_at INTEGER NOT NULL, + proposal_json TEXT NOT NULL + ) + """) + + # View change log + conn.execute(""" + CREATE TABLE IF NOT EXISTS bft_view_changes ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + view INTEGER NOT NULL, + node_id TEXT NOT NULL, + timestamp INTEGER NOT NULL, + UNIQUE(view, node_id) + ) + """) + + conn.commit() + + logging.info(f"BFT consensus initialized for node {self.node_id}") + + def register_peer(self, node_id: str, url: str): + """Register a peer node""" + with self.lock: + self.peers[node_id] = url + logging.info(f"Registered peer: {node_id} at {url}") + + def get_total_nodes(self) -> int: + """Get total number of nodes including self""" + return len(self.peers) + 1 + + def get_fault_tolerance(self) -> int: + """Calculate f (max faulty nodes we can tolerate)""" + # BFT requires n >= 3f + 1, so we can tolerate f = floor((n-1)/3) faulty nodes. + # E.g., 4 nodes → f=1: one Byzantine node cannot forge a 2/3 quorum. + n = self.get_total_nodes() + return (n - 1) // 3 + + def get_quorum_size(self) -> int: + """Get quorum size for consensus""" + # Quorum = 2f + 1 = ceil(2n/3). Using integer arithmetic (2n+2)//3 avoids + # floating point and always rounds up, ensuring we exceed the 2/3 threshold. + n = self.get_total_nodes() + return (2 * n + 2) // 3 + + def is_leader(self, view: int = None) -> bool: + """Check if this node is the leader for current view""" + if view is None: + view = self.current_view + + # Deterministic round-robin: sorting by node_id ensures all nodes agree on + # the leader ordering without a separate election or coordinator. + nodes = sorted([self.node_id] + list(self.peers.keys())) + leader_idx = view % len(nodes) + return nodes[leader_idx] == self.node_id + + def get_leader(self, view: int = None) -> str: + """Get the leader node ID for a view""" + if view is None: + view = self.current_view + + nodes = sorted([self.node_id] + list(self.peers.keys())) + leader_idx = view % len(nodes) + return nodes[leader_idx] + + def _sign_message(self, data: str) -> str: + """Sign a message with HMAC""" + return hmac.new( + self.secret_key.encode(), + data.encode(), + hashlib.sha256 + ).hexdigest() + + def _verify_signature(self, node_id: str, data: str, signature: str) -> bool: + """Verify message signature (simplified - all nodes share key in testnet)""" + # In production, each node would have its own keypair (ed25519 or similar). + # Shared HMAC is acceptable in a trusted-operator testnet but means one + # compromised node can forge messages from any peer. + # hmac.compare_digest prevents timing side-channel leaks. + expected = hmac.new( + self.secret_key.encode(), + data.encode(), + hashlib.sha256 + ).hexdigest() + return hmac.compare_digest(signature, expected) + + # ======================================================================== + # PHASE 1: PRE-PREPARE (Leader proposes) + # ======================================================================== + + def propose_epoch_settlement(self, epoch: int, miners: List[Dict], + distribution: Dict[str, float]) -> Optional[ConsensusMessage]: + """ + Leader proposes epoch settlement (PRE-PREPARE phase). + Only the leader for current view can call this. + """ + with self.lock: + if not self.is_leader(): + logging.warning(f"Node {self.node_id} is not leader for view {self.current_view}") + return None + + if epoch in self.committed_epochs: + logging.info(f"Epoch {epoch} already committed") + return None + + # Create proposal + proposal = EpochProposal( + epoch=epoch, + miners=miners, + total_reward=1.5, # RTC per epoch + distribution=distribution, + proposer=self.node_id, + merkle_root=self._compute_merkle_root(miners) + ) + + digest = proposal.compute_digest() + timestamp = int(time.time()) + + # Sign the message + sign_data = f"{MessageType.PRE_PREPARE.value}:{self.current_view}:{epoch}:{digest}:{timestamp}" + signature = self._sign_message(sign_data) + + # Create PRE-PREPARE message + msg = ConsensusMessage( + msg_type=MessageType.PRE_PREPARE.value, + view=self.current_view, + epoch=epoch, + digest=digest, + node_id=self.node_id, + signature=signature, + timestamp=timestamp, + proposal=asdict(proposal) + ) + + # Log locally + self.pre_prepare_log[epoch] = msg + self.phase = ConsensusPhase.PRE_PREPARE + self._save_message_to_db(msg) + + # Start view change timer + self._start_view_change_timer() + + # Broadcast to peers + self._broadcast_message(msg) + + logging.info(f"[PRE-PREPARE] Leader proposed epoch {epoch} settlement") + + # Leader also prepares + self._handle_pre_prepare(msg) + + return msg + + def _compute_merkle_root(self, miners: List[Dict]) -> str: + """Compute merkle root of miner attestations""" + if not miners: + return hashlib.sha256(b"empty").hexdigest() + + # Simple merkle: hash all miner data + hashes = [ + hashlib.sha256(json.dumps(m, sort_keys=True).encode()).hexdigest() + for m in miners + ] + + while len(hashes) > 1: + # Duplicate the last leaf when the count is odd so we always pair evenly. + # This is the standard Bitcoin-style merkle padding strategy. + if len(hashes) % 2 == 1: + hashes.append(hashes[-1]) + new_hashes = [] + for i in range(0, len(hashes), 2): + combined = hashes[i] + hashes[i + 1] + new_hashes.append(hashlib.sha256(combined.encode()).hexdigest()) + hashes = new_hashes + + return hashes[0] + + # ======================================================================== + # PHASE 2: PREPARE (Nodes validate and prepare) + # ======================================================================== + + def _handle_pre_prepare(self, msg: ConsensusMessage) -> Optional[ConsensusMessage]: + """Handle received PRE-PREPARE message""" + with self.lock: + epoch = msg.epoch + + # Validate message + if msg.view != self.current_view: + logging.warning(f"PRE-PREPARE for wrong view: {msg.view} != {self.current_view}") + return None + + if epoch in self.committed_epochs: + logging.info(f"Epoch {epoch} already committed") + return None + + # Verify it's from the leader + if msg.node_id != self.get_leader(msg.view): + logging.warning(f"PRE-PREPARE not from leader: {msg.node_id}") + return None + + # Validate proposal (hardware attestation checks) + if not self._validate_proposal(msg.proposal): + logging.warning(f"Invalid proposal for epoch {epoch}") + return None + + # Store PRE-PREPARE + if epoch not in self.pre_prepare_log: + self.pre_prepare_log[epoch] = msg + self._save_message_to_db(msg) + + # Send PREPARE message + timestamp = int(time.time()) + sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{timestamp}" + signature = self._sign_message(sign_data) + + prepare_msg = ConsensusMessage( + msg_type=MessageType.PREPARE.value, + view=msg.view, + epoch=epoch, + digest=msg.digest, + node_id=self.node_id, + signature=signature, + timestamp=timestamp + ) + + # Log prepare + if epoch not in self.prepare_log: + self.prepare_log[epoch] = {} + self.prepare_log[epoch][self.node_id] = prepare_msg + self._save_message_to_db(prepare_msg) + + self.phase = ConsensusPhase.PREPARE + + # Broadcast PREPARE + self._broadcast_message(prepare_msg) + + logging.info(f"[PREPARE] Node {self.node_id} prepared epoch {epoch}") + + # Check if we have quorum to commit + self._check_prepare_quorum(epoch) + + return prepare_msg + + def handle_prepare(self, msg: ConsensusMessage): + """Handle received PREPARE message from peer""" + with self.lock: + epoch = msg.epoch + + # Validate + if msg.view != self.current_view: + return + + if epoch in self.committed_epochs: + return + + # Verify signature + sign_data = f"{MessageType.PREPARE.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" + if not self._verify_signature(msg.node_id, sign_data, msg.signature): + logging.warning(f"Invalid PREPARE signature from {msg.node_id}") + return + + # Store prepare + if epoch not in self.prepare_log: + self.prepare_log[epoch] = {} + + if msg.node_id not in self.prepare_log[epoch]: + self.prepare_log[epoch][msg.node_id] = msg + self._save_message_to_db(msg) + logging.info(f"[PREPARE] Received from {msg.node_id} for epoch {epoch}") + + # Check quorum + self._check_prepare_quorum(epoch) + + def _check_prepare_quorum(self, epoch: int): + """Check if we have quorum of PREPARE messages""" + if epoch not in self.prepare_log: + return + + prepare_count = len(self.prepare_log[epoch]) + quorum = self.get_quorum_size() + + logging.info(f"[PREPARE] Epoch {epoch}: {prepare_count}/{quorum} prepares") + + # Phase guard prevents sending duplicate COMMITs if more PREPAREs arrive + # after we already advanced — only transition once per epoch. + if prepare_count >= quorum and self.phase == ConsensusPhase.PREPARE: + # Transition to COMMIT phase + self._send_commit(epoch) + + # ======================================================================== + # PHASE 3: COMMIT (Finalize consensus) + # ======================================================================== + + def _send_commit(self, epoch: int): + """Send COMMIT message after receiving quorum of PREPAREs""" + with self.lock: + if epoch not in self.pre_prepare_log: + return + + pre_prepare = self.pre_prepare_log[epoch] + timestamp = int(time.time()) + + sign_data = f"{MessageType.COMMIT.value}:{pre_prepare.view}:{epoch}:{pre_prepare.digest}:{timestamp}" + signature = self._sign_message(sign_data) + + commit_msg = ConsensusMessage( + msg_type=MessageType.COMMIT.value, + view=pre_prepare.view, + epoch=epoch, + digest=pre_prepare.digest, + node_id=self.node_id, + signature=signature, + timestamp=timestamp + ) + + # Log commit + if epoch not in self.commit_log: + self.commit_log[epoch] = {} + self.commit_log[epoch][self.node_id] = commit_msg + self._save_message_to_db(commit_msg) + + self.phase = ConsensusPhase.COMMIT + + # Broadcast COMMIT + self._broadcast_message(commit_msg) + + logging.info(f"[COMMIT] Node {self.node_id} committed epoch {epoch}") + + # Check commit quorum + self._check_commit_quorum(epoch) + + def handle_commit(self, msg: ConsensusMessage): + """Handle received COMMIT message""" + with self.lock: + epoch = msg.epoch + + if epoch in self.committed_epochs: + return + + # Verify signature + sign_data = f"{MessageType.COMMIT.value}:{msg.view}:{epoch}:{msg.digest}:{msg.timestamp}" + if not self._verify_signature(msg.node_id, sign_data, msg.signature): + logging.warning(f"Invalid COMMIT signature from {msg.node_id}") + return + + # Store commit + if epoch not in self.commit_log: + self.commit_log[epoch] = {} + + if msg.node_id not in self.commit_log[epoch]: + self.commit_log[epoch][msg.node_id] = msg + self._save_message_to_db(msg) + logging.info(f"[COMMIT] Received from {msg.node_id} for epoch {epoch}") + + # Check quorum + self._check_commit_quorum(epoch) + + def _check_commit_quorum(self, epoch: int): + """Check if we have quorum of COMMIT messages""" + if epoch not in self.commit_log: + return + + commit_count = len(self.commit_log[epoch]) + quorum = self.get_quorum_size() + + logging.info(f"[COMMIT] Epoch {epoch}: {commit_count}/{quorum} commits") + + if commit_count >= quorum and epoch not in self.committed_epochs: + # CONSENSUS REACHED! + self._finalize_epoch(epoch) + + def _finalize_epoch(self, epoch: int): + """Finalize epoch after consensus reached""" + with self.lock: + if epoch in self.committed_epochs: + return + + self.committed_epochs.add(epoch) + self.phase = ConsensusPhase.COMMITTED + + # Cancel view change timer + self._cancel_view_change_timer() + + # Get the proposal + pre_prepare = self.pre_prepare_log.get(epoch) + if not pre_prepare or not pre_prepare.proposal: + logging.error(f"No proposal found for committed epoch {epoch}") + return + + # Save to committed epochs table + with sqlite3.connect(self.db_path) as conn: + conn.execute(""" + INSERT OR REPLACE INTO bft_committed_epochs + (epoch, view, digest, committed_at, proposal_json) + VALUES (?, ?, ?, ?, ?) + """, (epoch, self.current_view, pre_prepare.digest, + int(time.time()), json.dumps(pre_prepare.proposal))) + conn.commit() + + logging.info(f"CONSENSUS REACHED for epoch {epoch}") + logging.info(f" Digest: {pre_prepare.digest[:16]}...") + logging.info(f" Proposer: {pre_prepare.proposal.get('proposer')}") + + # Apply the settlement (distribute rewards) + self._apply_settlement(pre_prepare.proposal) + + def _apply_settlement(self, proposal: Dict): + """Apply the consensus settlement to database""" + epoch = proposal.get('epoch') + distribution = proposal.get('distribution', {}) + + with sqlite3.connect(self.db_path) as conn: + for miner_id, reward in distribution.items(): + # Update balance + conn.execute(""" + INSERT INTO balances (miner_id, amount_i64) + VALUES (?, ?) + ON CONFLICT(miner_id) DO UPDATE SET + amount_i64 = amount_i64 + excluded.amount_i64 + # Store as integer micro-RTC (1 RTC = 1,000,000 uRTC) to avoid + # floating-point drift accumulating across many ledger entries. + """, (miner_id, int(reward * 1_000_000))) + + # Log in ledger + conn.execute(""" + INSERT INTO ledger (miner_id, delta_i64, tx_type, memo, ts) + VALUES (?, ?, 'reward', ?, ?) + """, (miner_id, int(reward * 1_000_000), f"epoch_{epoch}_bft", int(time.time()))) + + conn.commit() + + logging.info(f"Applied settlement for epoch {epoch}: {len(distribution)} miners rewarded") + + # ======================================================================== + # VIEW CHANGE (Leader failure handling) + # ======================================================================== + + def _start_view_change_timer(self): + """Start timer for view change if consensus not reached""" + self._cancel_view_change_timer() + + self.view_change_timer = threading.Timer(VIEW_CHANGE_TIMEOUT, self._trigger_view_change) + self.view_change_timer.daemon = True + self.view_change_timer.start() + + def _cancel_view_change_timer(self): + """Cancel view change timer""" + if self.view_change_timer: + self.view_change_timer.cancel() + self.view_change_timer = None + + def _trigger_view_change(self): + """Trigger view change due to timeout""" + with self.lock: + logging.warning(f"[VIEW-CHANGE] Timeout! Requesting view {self.current_view + 1}") + self.phase = ConsensusPhase.VIEW_CHANGE + + new_view = self.current_view + 1 + timestamp = int(time.time()) + + sign_data = f"{MessageType.VIEW_CHANGE.value}:{new_view}:{self.current_epoch}:{timestamp}" + signature = self._sign_message(sign_data) + + vc_msg = ViewChangeMessage( + view=new_view, + epoch=self.current_epoch, + node_id=self.node_id, + prepared_cert=None, # Could include prepared certificate + signature=signature + ) + + # Log view change + if new_view not in self.view_change_log: + self.view_change_log[new_view] = {} + self.view_change_log[new_view][self.node_id] = vc_msg + + # Broadcast view change + self._broadcast_view_change(vc_msg) + + # Check if we have quorum for view change + self._check_view_change_quorum(new_view) + + def handle_view_change(self, msg_data: Dict): + """Handle received VIEW-CHANGE message""" + with self.lock: + new_view = msg_data.get('view') + node_id = msg_data.get('node_id') + + if new_view not in self.view_change_log: + self.view_change_log[new_view] = {} + + if node_id not in self.view_change_log[new_view]: + self.view_change_log[new_view][node_id] = ViewChangeMessage(**msg_data) + logging.info(f"[VIEW-CHANGE] Received from {node_id} for view {new_view}") + + self._check_view_change_quorum(new_view) + + def _check_view_change_quorum(self, new_view: int): + """Check if we have quorum for view change""" + if new_view not in self.view_change_log: + return + + vc_count = len(self.view_change_log[new_view]) + quorum = self.get_quorum_size() + + logging.info(f"[VIEW-CHANGE] View {new_view}: {vc_count}/{quorum} votes") + + if vc_count >= quorum: + self._perform_view_change(new_view) + + def _perform_view_change(self, new_view: int): + """Perform view change""" + with self.lock: + if new_view <= self.current_view: + return + + self.current_view = new_view + self.phase = ConsensusPhase.IDLE + + logging.info(f"[NEW-VIEW] Changed to view {new_view}, leader: {self.get_leader()}") + + # If we're the new leader, propose + if self.is_leader(): + logging.info(f"[NEW-VIEW] We are the new leader!") + # New leader should re-propose pending epochs + + # ======================================================================== + # VALIDATION + # ======================================================================== + + def _validate_proposal(self, proposal: Dict) -> bool: + """Validate an epoch settlement proposal""" + if not proposal: + return False + + epoch = proposal.get('epoch') + miners = proposal.get('miners', []) + distribution = proposal.get('distribution', {}) + + # Check epoch is valid + if epoch is None or epoch < 0: + return False + + # Use absolute tolerance rather than ==, since floating-point arithmetic + # on reward fractions can produce values like 1.4999999999 or 1.5000000001. + total = sum(distribution.values()) + if abs(total - 1.5) > 0.001: + logging.warning(f"Invalid total reward: {total} != 1.5") + return False + + # Check all miners in distribution are in miner list + miner_ids = {m.get('miner_id') for m in miners} + for miner_id in distribution: + if miner_id not in miner_ids: + logging.warning(f"Miner {miner_id} in distribution but not in miners list") + return False + + # Verify merkle_root matches the submitted miners list. + # Without this check a Byzantine leader can recycle a valid merkle_root + # from a previous epoch while submitting a different (falsified) miners + # list, and honest nodes would still send PREPARE for the forged proposal. + expected_merkle = self._compute_merkle_root(miners) + if proposal.get('merkle_root') != expected_merkle: + logging.warning( + f"Proposal merkle_root mismatch for epoch {epoch}: " + f"got {proposal.get('merkle_root', '')[:16]}... " + f"expected {expected_merkle[:16]}..." + ) + return False + + return True + + # ======================================================================== + # NETWORK + # ======================================================================== + + def _broadcast_message(self, msg: ConsensusMessage): + """Broadcast message to all peers""" + for node_id, url in self.peers.items(): + try: + endpoint = f"{url}/bft/message" + response = requests.post( + endpoint, + json=msg.to_dict(), + timeout=5, + headers={'X-Node-ID': self.node_id} + ) + if response.ok: + logging.debug(f"Broadcast {msg.msg_type} to {node_id}") + except Exception as e: + logging.error(f"Failed to broadcast to {node_id}: {e}") + + def _broadcast_view_change(self, msg: ViewChangeMessage): + """Broadcast view change message""" + msg_data = asdict(msg) + for node_id, url in self.peers.items(): + try: + endpoint = f"{url}/bft/view_change" + response = requests.post(endpoint, json=msg_data, timeout=5) + if response.ok: + logging.debug(f"Broadcast VIEW-CHANGE to {node_id}") + except Exception as e: + logging.error(f"Failed to broadcast VIEW-CHANGE to {node_id}: {e}") + + def _save_message_to_db(self, msg: ConsensusMessage): + """Save consensus message to database""" + try: + with sqlite3.connect(self.db_path) as conn: + conn.execute(""" + INSERT OR REPLACE INTO bft_consensus_log + (epoch, view, msg_type, node_id, digest, proposal_json, signature, timestamp) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, ( + msg.epoch, msg.view, msg.msg_type, msg.node_id, + msg.digest, json.dumps(msg.proposal) if msg.proposal else None, + msg.signature, msg.timestamp + )) + conn.commit() + except Exception as e: + logging.error(f"Failed to save message: {e}") + + def receive_message(self, msg_data: Dict): + """Handle incoming consensus message""" + msg_type = msg_data.get('msg_type') + + if msg_type == MessageType.PRE_PREPARE.value: + msg = ConsensusMessage.from_dict(msg_data) + self._handle_pre_prepare(msg) + elif msg_type == MessageType.PREPARE.value: + msg = ConsensusMessage.from_dict(msg_data) + self.handle_prepare(msg) + elif msg_type == MessageType.COMMIT.value: + msg = ConsensusMessage.from_dict(msg_data) + self.handle_commit(msg) + + # ======================================================================== + # STATUS + # ======================================================================== + + def get_status(self) -> Dict: + """Get consensus status""" + with self.lock: + return { + 'node_id': self.node_id, + 'current_view': self.current_view, + 'current_epoch': self.current_epoch, + 'phase': self.phase.value, + 'leader': self.get_leader(), + 'is_leader': self.is_leader(), + 'total_nodes': self.get_total_nodes(), + 'fault_tolerance': self.get_fault_tolerance(), + 'quorum_size': self.get_quorum_size(), + 'committed_epochs': len(self.committed_epochs), + 'peers': list(self.peers.keys()) + } + + +# ============================================================================ +# FLASK ROUTES FOR BFT +# ============================================================================ + +def create_bft_routes(app, bft: BFTConsensus): + """Add BFT consensus routes to Flask app""" + from flask import request, jsonify + + @app.route('/bft/status', methods=['GET']) + def bft_status(): + """Get BFT consensus status""" + return jsonify(bft.get_status()) + + @app.route('/bft/message', methods=['POST']) + def bft_receive_message(): + """Receive consensus message from peer""" + try: + msg_data = request.get_json() + bft.receive_message(msg_data) + return jsonify({'status': 'ok'}) + except Exception as e: + logging.error(f"BFT message error: {e}") + return jsonify({'error': str(e)}), 400 + + @app.route('/bft/view_change', methods=['POST']) + def bft_view_change(): + """Receive view change message""" + try: + msg_data = request.get_json() + bft.handle_view_change(msg_data) + return jsonify({'status': 'ok'}) + except Exception as e: + logging.error(f"BFT view change error: {e}") + return jsonify({'error': str(e)}), 400 + + @app.route('/bft/propose', methods=['POST']) + def bft_propose(): + """Manually trigger epoch proposal (admin)""" + try: + data = request.get_json() + epoch = data.get('epoch') + miners = data.get('miners', []) + distribution = data.get('distribution', {}) + + msg = bft.propose_epoch_settlement(epoch, miners, distribution) + if msg: + return jsonify({'status': 'proposed', 'digest': msg.digest}) + else: + return jsonify({'error': 'not_leader_or_already_committed'}), 400 + except Exception as e: + logging.error(f"BFT propose error: {e}") + return jsonify({'error': str(e)}), 500 + + +# ============================================================================ +# MAIN (Testing) +# ============================================================================ + +if __name__ == "__main__": + import sys + + print("=" * 60) + print("RustChain BFT Consensus Module - RIP-0202") + print("=" * 60) + + # Test with mock data + node_id = sys.argv[1] if len(sys.argv) > 1 else "node-131" + db_path = "/tmp/bft_test.db" + secret_key = "rustchain_bft_testnet_key_2025" + + bft = BFTConsensus(node_id, db_path, secret_key) + + # Register peer + bft.register_peer("node-153", "http://50.28.86.153:8099") + + print(f"\nNode: {node_id}") + print(f"Is Leader: {bft.is_leader()}") + print(f"Current View: {bft.current_view}") + print(f"Total Nodes: {bft.get_total_nodes()}") + print(f"Quorum Size: {bft.get_quorum_size()}") + print(f"Fault Tolerance: {bft.get_fault_tolerance()}") + + if bft.is_leader(): + print("\nProposing epoch settlement...") + + # Mock miner data + miners = [ + {'miner_id': 'g4-powerbook-115', 'device_arch': 'G4', 'weight': 2.5}, + {'miner_id': 'sophia-nas-c4130', 'device_arch': 'modern', 'weight': 1.0}, + ] + + total_weight = sum(m['weight'] for m in miners) + distribution = { + m['miner_id']: 1.5 * (m['weight'] / total_weight) + for m in miners + } + + msg = bft.propose_epoch_settlement(epoch=425, miners=miners, distribution=distribution) + if msg: + print(f"Proposed! Digest: {msg.digest[:32]}...") + + print("\n" + "=" * 60) + print("Status:", json.dumps(bft.get_status(), indent=2)) diff --git a/rips/python/rustchain/deep_entropy.py b/rips/python/rustchain/deep_entropy.py index 05d9531b..26f6b0b1 100644 --- a/rips/python/rustchain/deep_entropy.py +++ b/rips/python/rustchain/deep_entropy.py @@ -1,564 +1,564 @@ -""" -RustChain Deep Entropy Hardware Verification (RIP-0003) -======================================================= - -Multi-layer entropy verification that makes emulation economically irrational. - -Philosophy: It should be cheaper to buy a $50 486 than to emulate one. - -Layers: -1. Instruction Timing Entropy - CPU-specific timing patterns -2. Memory Access Pattern Entropy - Cache/DRAM behavior -3. Bus Timing Entropy - ISA/PCI/PCIe timing signatures -4. Thermal Entropy - Clock stability, DVFS detection -5. Architectural Quirk Entropy - Known hardware bugs/quirks -""" - -import hashlib -import math -import time -import random -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple, Any -from enum import Enum - - -# ============================================================================= -# Constants -# ============================================================================= - -ENTROPY_SAMPLES_REQUIRED: int = 1000 -MIN_ENTROPY_BITS: int = 64 -EMULATION_COST_THRESHOLD_USD: float = 100.0 # Cheaper to buy real hardware - - -# ============================================================================= -# Hardware Profiles -# ============================================================================= - -@dataclass -class HardwareProfile: - """Known hardware profile for validation""" - name: str - cpu_family: int - year_introduced: int - expected_bus_type: str - expected_quirks: List[str] - emulation_difficulty: float # 0.0-1.0, how hard to emulate - - # Expected instruction timing ranges (instruction -> (min_cycles, max_cycles)) - instruction_timings: Dict[str, Tuple[float, float]] = field(default_factory=dict) - - -# Known hardware database -HARDWARE_PROFILES: Dict[str, HardwareProfile] = { - "486DX2": HardwareProfile( - name="Intel 486 DX2-66", - cpu_family=4, - year_introduced=1992, - expected_bus_type="ISA", - expected_quirks=["no_rdtsc", "a20_gate"], - emulation_difficulty=0.95, - instruction_timings={ - "mul": (13.0, 42.0), - "div": (40.0, 44.0), - "fadd": (8.0, 20.0), - "fmul": (16.0, 27.0), - }, - ), - "Pentium": HardwareProfile( - name="Intel Pentium 100", - cpu_family=5, - year_introduced=1994, - expected_bus_type="PCI", - expected_quirks=["fdiv_bug"], - emulation_difficulty=0.90, - instruction_timings={ - "mul": (10.0, 11.0), - "div": (17.0, 41.0), - "fadd": (3.0, 3.0), - "fmul": (3.0, 3.0), - }, - ), - "PentiumII": HardwareProfile( - name="Intel Pentium II", - cpu_family=6, - year_introduced=1997, - expected_bus_type="PCI", - expected_quirks=["f00f_bug"], - emulation_difficulty=0.85, - instruction_timings={ - "mul": (4.0, 5.0), - "div": (17.0, 41.0), - "fadd": (3.0, 3.0), - "fmul": (5.0, 5.0), - }, - ), - "G4": HardwareProfile( - name="PowerPC G4", - cpu_family=74, - year_introduced=1999, - expected_bus_type="PCI", - expected_quirks=["altivec", "big_endian"], - emulation_difficulty=0.85, - instruction_timings={ - "mul": (3.0, 4.0), - "div": (20.0, 35.0), - "fadd": (5.0, 5.0), - "fmul": (5.0, 5.0), - }, - ), - "G5": HardwareProfile( - name="PowerPC G5", - cpu_family=75, - year_introduced=2003, - expected_bus_type="PCI-X", - expected_quirks=["altivec", "big_endian", "970fx"], - emulation_difficulty=0.80, - instruction_timings={ - "mul": (2.0, 4.0), - "div": (15.0, 33.0), - "fadd": (4.0, 4.0), - "fmul": (4.0, 4.0), - }, - ), - "Alpha": HardwareProfile( - name="DEC Alpha 21264", - cpu_family=21, - year_introduced=1998, - expected_bus_type="PCI", - expected_quirks=["alpha_pal", "64bit_native"], - emulation_difficulty=0.95, - instruction_timings={ - "mul": (4.0, 7.0), - "div": (12.0, 16.0), - "fadd": (4.0, 4.0), - "fmul": (4.0, 4.0), - }, - ), -} - - -# ============================================================================= -# Entropy Layers -# ============================================================================= - -@dataclass -class InstructionTimingLayer: - """Layer 1: Instruction timing measurements""" - timings: Dict[str, Dict[str, float]] # instruction -> {mean, std_dev, min, max} - cache_miss_penalty: float - branch_misprediction_cost: float - - -@dataclass -class MemoryPatternLayer: - """Layer 2: Memory access patterns""" - sequential_read_rate: float - random_read_rate: float - stride_patterns: Dict[int, float] # stride size -> rate - page_crossing_penalty: float - refresh_interference_detected: bool - - -@dataclass -class BusTimingLayer: - """Layer 3: Bus timing characteristics""" - bus_type: str - io_read_ns: float - io_write_ns: float - timing_variance: float - interrupt_latency_us: float - - -@dataclass -class ThermalEntropyLayer: - """Layer 4: Thermal/clock characteristics""" - clock_frequency_mhz: float - clock_variance: float - frequency_changed: bool - c_states_detected: List[str] - p_states_detected: List[str] - - -@dataclass -class QuirkEntropyLayer: - """Layer 5: Architectural quirks""" - detected_quirks: List[str] - quirk_test_results: Dict[str, Dict[str, Any]] - - -@dataclass -class EntropyProof: - """Complete entropy proof from hardware""" - instruction_layer: InstructionTimingLayer - memory_layer: MemoryPatternLayer - bus_layer: BusTimingLayer - thermal_layer: ThermalEntropyLayer - quirk_layer: QuirkEntropyLayer - challenge_response: bytes - computation_time_us: int - timestamp: int - signature_hash: str - - -# ============================================================================= -# Entropy Scores -# ============================================================================= - -@dataclass -class EntropyScores: - """Verification scores from each layer""" - instruction: float = 0.0 - memory: float = 0.0 - bus: float = 0.0 - thermal: float = 0.0 - quirks: float = 0.0 - total: float = 0.0 - - -@dataclass -class VerificationResult: - """Result of entropy verification""" - valid: bool - total_score: float - scores: EntropyScores - issues: List[str] - emulation_probability: float - - -# ============================================================================= -# Deep Entropy Verifier -# ============================================================================= - -class DeepEntropyVerifier: - """ - Multi-layer entropy verification system. - - Makes emulation economically irrational by requiring perfect simulation - of vintage hardware characteristics that are: - 1. Difficult to obtain without real hardware - 2. Expensive to compute/simulate - 3. Unique to each hardware generation - - Cost analysis: - - GPU compute to emulate 486 at real-time: ~50-100 hours @ $0.50/hr = $25-50 - - Cost of 486 on eBay: $30-80 one-time - - ROI for buying real hardware: 1 day of mining - - Conclusion: Deep entropy makes emulation economically irrational. - """ - - def __init__(self): - self.profiles = HARDWARE_PROFILES - self.thresholds = { - "min_instruction_entropy": 0.15, - "min_memory_entropy": 0.10, - "min_bus_entropy": 0.15, - "min_thermal_entropy": 0.05, - "min_quirk_entropy": 0.20, - "total_min_entropy": 0.65, - } - - def generate_challenge(self) -> Dict[str, Any]: - """Generate a challenge for hardware to solve""" - nonce = hashlib.sha256(str(time.time()).encode()).digest() - # Multiply the 4-op template by 25 to produce 100 total operations. - # The randomised values ensure each challenge is unique, preventing - # a cached replay attack where an attacker pre-records a real machine's response. - operations = [ - {"op": "mul", "value": random.randint(1, 1000000)}, - {"op": "div", "value": random.randint(1, 1000)}, - {"op": "fadd", "value": random.uniform(0, 1000)}, - {"op": "memory", "stride": random.choice([1, 4, 16, 64, 256])}, - ] * 25 # 100 operations - - return { - "nonce": nonce.hex(), - "operations": operations, - "expected_time_range_us": (1000, 100000), # 1ms to 100ms - "timestamp": int(time.time()), - "expires_at": int(time.time()) + 300, # 5 minute expiry - } - - def verify(self, proof: EntropyProof, claimed_hardware: str) -> VerificationResult: - """ - Verify an entropy proof against claimed hardware. - - Args: - proof: Complete entropy proof from hardware - claimed_hardware: Hardware profile key (e.g., "486DX2", "G4") - - Returns: - VerificationResult with scores and issues - """ - scores = EntropyScores() - issues = [] - - # Get expected profile - profile = self.profiles.get(claimed_hardware) - if not profile: - return VerificationResult( - valid=False, - total_score=0.0, - scores=scores, - issues=[f"Unknown hardware profile: {claimed_hardware}"], - emulation_probability=1.0, - ) - - # Layer 1: Verify instruction timing - scores.instruction = self._verify_instruction_layer( - proof.instruction_layer, profile - ) - if scores.instruction < self.thresholds["min_instruction_entropy"]: - issues.append( - f"Instruction timing entropy too low: {scores.instruction:.2f}" - ) - - # Layer 2: Verify memory patterns - scores.memory = self._verify_memory_layer(proof.memory_layer, profile) - if scores.memory < self.thresholds["min_memory_entropy"]: - issues.append(f"Memory pattern entropy too low: {scores.memory:.2f}") - - # Layer 3: Verify bus timing - scores.bus = self._verify_bus_layer(proof.bus_layer, profile) - if scores.bus < self.thresholds["min_bus_entropy"]: - issues.append(f"Bus timing entropy too low: {scores.bus:.2f}") - - # Layer 4: Verify thermal characteristics - scores.thermal = self._verify_thermal_layer(proof.thermal_layer, profile) - if scores.thermal < self.thresholds["min_thermal_entropy"]: - issues.append(f"Thermal entropy suspicious: {scores.thermal:.2f}") - - # Layer 5: Verify architectural quirks - scores.quirks = self._verify_quirk_layer(proof.quirk_layer, profile) - if scores.quirks < self.thresholds["min_quirk_entropy"]: - issues.append(f"Expected quirks not detected: {scores.quirks:.2f}") - - # Instruction timing carries the most weight (0.25) because it is the - # hardest to spoof consistently across all four measured operations. - # Thermal gets the least (0.15) since it can legitimately vary with room temp. - scores.total = ( - scores.instruction * 0.25 + - scores.memory * 0.20 + - scores.bus * 0.20 + - scores.thermal * 0.15 + - scores.quirks * 0.20 - ) - - # Scale emulation probability by hardware-specific difficulty: an Alpha - # (0.95) with the same total_score as a G5 (0.80) is harder to emulate, - # so its inferred emulation probability is lower. - emulation_prob = max(0.0, 1.0 - (scores.total * profile.emulation_difficulty)) - - valid = ( - scores.total >= self.thresholds["total_min_entropy"] and - len(issues) == 0 - ) - - return VerificationResult( - valid=valid, - total_score=scores.total, - scores=scores, - issues=issues, - emulation_probability=emulation_prob, - ) - - def _verify_instruction_layer( - self, layer: InstructionTimingLayer, profile: HardwareProfile - ) -> float: - """Verify instruction timing matches expected profile""" - score = 0.0 - checks = 0 - - for instruction, expected_range in profile.instruction_timings.items(): - if instruction in layer.timings: - checks += 1 - measured = layer.timings[instruction] - min_expected, max_expected = expected_range - - # Check if mean is within expected range - if min_expected <= measured.get("mean", 0) <= max_expected: - score += 0.5 - - # Variance check: real vintage CPUs have natural thermal jitter. - # An emulator tends to be either too uniform (std_dev ≈ 0) or - # unrealistically noisy. The 0.5× mean cap rejects the latter. - std_dev = measured.get("std_dev", 0) - mean = measured.get("mean", 1) - if 0 < std_dev < mean * 0.5: - score += 0.5 - - return score / checks if checks > 0 else 0.0 - - def _verify_memory_layer( - self, layer: MemoryPatternLayer, profile: HardwareProfile - ) -> float: - """Verify memory access patterns""" - score = 0.0 - - # Vintage hardware should show significant stride-dependent timing - if layer.stride_patterns: - stride_1 = layer.stride_patterns.get(1, 1) - stride_64 = layer.stride_patterns.get(64, 1) - if stride_64 / stride_1 > 1.5: - score += 0.3 # Good cache behavior signature - - # Page crossing penalty should be detectable - if layer.page_crossing_penalty > 10.0: - score += 0.3 - - # DRAM refresh interference is the strongest single signal here: - # real DRAM periodically stalls reads for a row refresh cycle (~7µs), - # which virtualised memory and SRAM-backed emulators never exhibit. - if layer.refresh_interference_detected: - score += 0.4 - - return score - - def _verify_bus_layer( - self, layer: BusTimingLayer, profile: HardwareProfile - ) -> float: - """Verify bus timing characteristics""" - score = 0.0 - - # Check bus type matches - if layer.bus_type == profile.expected_bus_type: - score += 0.5 - - # Verify I/O timing is in expected range for bus type - expected_ranges = { - "ISA": (1000, 2500), # Very slow - "EISA": (500, 1500), - "VLB": (100, 500), - "PCI": (50, 200), - "PCI-X": (30, 150), - "AGP": (30, 150), - "PCIe": (5, 50), # Very fast - } - - if layer.bus_type in expected_ranges: - min_io, max_io = expected_ranges[layer.bus_type] - if min_io <= layer.io_read_ns <= max_io: - score += 0.3 - - # Vintage hardware has slower interrupts - if layer.interrupt_latency_us > 1.0: - score += 0.2 - - return score - - def _verify_thermal_layer( - self, layer: ThermalEntropyLayer, profile: HardwareProfile - ) -> float: - """Verify thermal/clock characteristics""" - score = 0.0 - - # Vintage hardware predates DVFS (Dynamic Voltage and Frequency Scaling), - # C-states (CPU idle power states), and P-states (performance states). - # Detecting any of these is a strong sign the "hardware" is a modern host. - if not layer.frequency_changed: - score += 0.4 - - if not layer.c_states_detected: - score += 0.3 - - if not layer.p_states_detected: - score += 0.3 - - return score - - def _verify_quirk_layer( - self, layer: QuirkEntropyLayer, profile: HardwareProfile - ) -> float: - """Verify architectural quirks are present""" - if not profile.expected_quirks: - return 1.0 - - detected = 0 - for expected_quirk in profile.expected_quirks: - if expected_quirk in layer.detected_quirks: - detected += 1 - elif expected_quirk in layer.quirk_test_results: - result = layer.quirk_test_results[expected_quirk] - if result.get("detected") and result.get("confidence", 0) > 0.8: - detected += 1 - - return detected / len(profile.expected_quirks) - - -# ============================================================================= -# Economic Analysis -# ============================================================================= - -def emulation_cost_analysis(hardware_type: str) -> Dict[str, Any]: - """ - Analyze the economic cost of emulating vs. buying hardware. - - This proves why deep entropy makes emulation irrational. - """ - profile = HARDWARE_PROFILES.get(hardware_type) - if not profile: - return {"error": f"Unknown hardware: {hardware_type}"} - - # Rough GPU-hours estimate: harder-to-emulate hardware (emulation_difficulty → 1.0) - # requires more compute to faithfully replicate all timing layers at real-time speed. - gpu_hours_to_emulate = 50 + (profile.emulation_difficulty * 100) - gpu_cost_per_hour = 0.50 - emulation_cost = gpu_hours_to_emulate * gpu_cost_per_hour - - # Real hardware costs (approximate eBay prices) - hardware_prices = { - "486DX2": 50, - "Pentium": 40, - "PentiumII": 30, - "G4": 80, - "G5": 150, - "Alpha": 200, - } - real_cost = hardware_prices.get(hardware_type, 100) - - # Power costs (per year at $0.10/kWh) - power_watts = {"486DX2": 15, "Pentium": 25, "G4": 50, "G5": 100} - watts = power_watts.get(hardware_type, 50) - yearly_power_cost = watts * 24 * 365 * 0.10 / 1000 - - return { - "hardware": profile.name, - "emulation_difficulty": profile.emulation_difficulty, - "estimated_gpu_hours": gpu_hours_to_emulate, - "emulation_cost_usd": emulation_cost, - "real_hardware_cost_usd": real_cost, - "yearly_power_cost_usd": yearly_power_cost, - "breakeven_days": (emulation_cost - real_cost) / (yearly_power_cost / 365), - "recommendation": "BUY REAL HARDWARE" if emulation_cost > real_cost else "EMULATE", - "economic_conclusion": ( - f"Buying a real {profile.name} for ${real_cost} is " - f"{'cheaper' if real_cost < emulation_cost else 'more expensive'} " - f"than emulating (${emulation_cost:.2f})" - ), - } - - -if __name__ == "__main__": - print("=" * 70) - print("RUSTCHAIN DEEP ENTROPY - ECONOMIC ANALYSIS") - print("=" * 70) - print() - print("Why emulation is economically irrational:") - print() - - for hw_type in ["486DX2", "G4", "Alpha"]: - analysis = emulation_cost_analysis(hw_type) - print(f"📟 {analysis['hardware']}") - print(f" Emulation difficulty: {analysis['emulation_difficulty']:.0%}") - print(f" GPU hours to emulate: {analysis['estimated_gpu_hours']:.0f}") - print(f" Emulation cost: ${analysis['emulation_cost_usd']:.2f}") - print(f" Real hardware cost: ${analysis['real_hardware_cost_usd']:.2f}") - print(f" Yearly power cost: ${analysis['yearly_power_cost_usd']:.2f}") - print(f" 💡 {analysis['economic_conclusion']}") - print() - - print("=" * 70) - print("CONCLUSION: Buy a $50 486, don't waste $50+ trying to emulate it!") - print("=" * 70) +""" +RustChain Deep Entropy Hardware Verification (RIP-0003) +======================================================= + +Multi-layer entropy verification that makes emulation economically irrational. + +Philosophy: It should be cheaper to buy a $50 486 than to emulate one. + +Layers: +1. Instruction Timing Entropy - CPU-specific timing patterns +2. Memory Access Pattern Entropy - Cache/DRAM behavior +3. Bus Timing Entropy - ISA/PCI/PCIe timing signatures +4. Thermal Entropy - Clock stability, DVFS detection +5. Architectural Quirk Entropy - Known hardware bugs/quirks +""" + +import hashlib +import math +import time +import random +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple, Any +from enum import Enum + + +# ============================================================================= +# Constants +# ============================================================================= + +ENTROPY_SAMPLES_REQUIRED: int = 1000 +MIN_ENTROPY_BITS: int = 64 +EMULATION_COST_THRESHOLD_USD: float = 100.0 # Cheaper to buy real hardware + + +# ============================================================================= +# Hardware Profiles +# ============================================================================= + +@dataclass +class HardwareProfile: + """Known hardware profile for validation""" + name: str + cpu_family: int + year_introduced: int + expected_bus_type: str + expected_quirks: List[str] + emulation_difficulty: float # 0.0-1.0, how hard to emulate + + # Expected instruction timing ranges (instruction -> (min_cycles, max_cycles)) + instruction_timings: Dict[str, Tuple[float, float]] = field(default_factory=dict) + + +# Known hardware database +HARDWARE_PROFILES: Dict[str, HardwareProfile] = { + "486DX2": HardwareProfile( + name="Intel 486 DX2-66", + cpu_family=4, + year_introduced=1992, + expected_bus_type="ISA", + expected_quirks=["no_rdtsc", "a20_gate"], + emulation_difficulty=0.95, + instruction_timings={ + "mul": (13.0, 42.0), + "div": (40.0, 44.0), + "fadd": (8.0, 20.0), + "fmul": (16.0, 27.0), + }, + ), + "Pentium": HardwareProfile( + name="Intel Pentium 100", + cpu_family=5, + year_introduced=1994, + expected_bus_type="PCI", + expected_quirks=["fdiv_bug"], + emulation_difficulty=0.90, + instruction_timings={ + "mul": (10.0, 11.0), + "div": (17.0, 41.0), + "fadd": (3.0, 3.0), + "fmul": (3.0, 3.0), + }, + ), + "PentiumII": HardwareProfile( + name="Intel Pentium II", + cpu_family=6, + year_introduced=1997, + expected_bus_type="PCI", + expected_quirks=["f00f_bug"], + emulation_difficulty=0.85, + instruction_timings={ + "mul": (4.0, 5.0), + "div": (17.0, 41.0), + "fadd": (3.0, 3.0), + "fmul": (5.0, 5.0), + }, + ), + "G4": HardwareProfile( + name="PowerPC G4", + cpu_family=74, + year_introduced=1999, + expected_bus_type="PCI", + expected_quirks=["altivec", "big_endian"], + emulation_difficulty=0.85, + instruction_timings={ + "mul": (3.0, 4.0), + "div": (20.0, 35.0), + "fadd": (5.0, 5.0), + "fmul": (5.0, 5.0), + }, + ), + "G5": HardwareProfile( + name="PowerPC G5", + cpu_family=75, + year_introduced=2003, + expected_bus_type="PCI-X", + expected_quirks=["altivec", "big_endian", "970fx"], + emulation_difficulty=0.80, + instruction_timings={ + "mul": (2.0, 4.0), + "div": (15.0, 33.0), + "fadd": (4.0, 4.0), + "fmul": (4.0, 4.0), + }, + ), + "Alpha": HardwareProfile( + name="DEC Alpha 21264", + cpu_family=21, + year_introduced=1998, + expected_bus_type="PCI", + expected_quirks=["alpha_pal", "64bit_native"], + emulation_difficulty=0.95, + instruction_timings={ + "mul": (4.0, 7.0), + "div": (12.0, 16.0), + "fadd": (4.0, 4.0), + "fmul": (4.0, 4.0), + }, + ), +} + + +# ============================================================================= +# Entropy Layers +# ============================================================================= + +@dataclass +class InstructionTimingLayer: + """Layer 1: Instruction timing measurements""" + timings: Dict[str, Dict[str, float]] # instruction -> {mean, std_dev, min, max} + cache_miss_penalty: float + branch_misprediction_cost: float + + +@dataclass +class MemoryPatternLayer: + """Layer 2: Memory access patterns""" + sequential_read_rate: float + random_read_rate: float + stride_patterns: Dict[int, float] # stride size -> rate + page_crossing_penalty: float + refresh_interference_detected: bool + + +@dataclass +class BusTimingLayer: + """Layer 3: Bus timing characteristics""" + bus_type: str + io_read_ns: float + io_write_ns: float + timing_variance: float + interrupt_latency_us: float + + +@dataclass +class ThermalEntropyLayer: + """Layer 4: Thermal/clock characteristics""" + clock_frequency_mhz: float + clock_variance: float + frequency_changed: bool + c_states_detected: List[str] + p_states_detected: List[str] + + +@dataclass +class QuirkEntropyLayer: + """Layer 5: Architectural quirks""" + detected_quirks: List[str] + quirk_test_results: Dict[str, Dict[str, Any]] + + +@dataclass +class EntropyProof: + """Complete entropy proof from hardware""" + instruction_layer: InstructionTimingLayer + memory_layer: MemoryPatternLayer + bus_layer: BusTimingLayer + thermal_layer: ThermalEntropyLayer + quirk_layer: QuirkEntropyLayer + challenge_response: bytes + computation_time_us: int + timestamp: int + signature_hash: str + + +# ============================================================================= +# Entropy Scores +# ============================================================================= + +@dataclass +class EntropyScores: + """Verification scores from each layer""" + instruction: float = 0.0 + memory: float = 0.0 + bus: float = 0.0 + thermal: float = 0.0 + quirks: float = 0.0 + total: float = 0.0 + + +@dataclass +class VerificationResult: + """Result of entropy verification""" + valid: bool + total_score: float + scores: EntropyScores + issues: List[str] + emulation_probability: float + + +# ============================================================================= +# Deep Entropy Verifier +# ============================================================================= + +class DeepEntropyVerifier: + """ + Multi-layer entropy verification system. + + Makes emulation economically irrational by requiring perfect simulation + of vintage hardware characteristics that are: + 1. Difficult to obtain without real hardware + 2. Expensive to compute/simulate + 3. Unique to each hardware generation + + Cost analysis: + - GPU compute to emulate 486 at real-time: ~50-100 hours @ $0.50/hr = $25-50 + - Cost of 486 on eBay: $30-80 one-time + - ROI for buying real hardware: 1 day of mining + + Conclusion: Deep entropy makes emulation economically irrational. + """ + + def __init__(self): + self.profiles = HARDWARE_PROFILES + self.thresholds = { + "min_instruction_entropy": 0.15, + "min_memory_entropy": 0.10, + "min_bus_entropy": 0.15, + "min_thermal_entropy": 0.05, + "min_quirk_entropy": 0.20, + "total_min_entropy": 0.65, + } + + def generate_challenge(self) -> Dict[str, Any]: + """Generate a challenge for hardware to solve""" + nonce = hashlib.sha256(str(time.time()).encode()).digest() + # Multiply the 4-op template by 25 to produce 100 total operations. + # The randomised values ensure each challenge is unique, preventing + # a cached replay attack where an attacker pre-records a real machine's response. + operations = [ + {"op": "mul", "value": random.randint(1, 1000000)}, + {"op": "div", "value": random.randint(1, 1000)}, + {"op": "fadd", "value": random.uniform(0, 1000)}, + {"op": "memory", "stride": random.choice([1, 4, 16, 64, 256])}, + ] * 25 # 100 operations + + return { + "nonce": nonce.hex(), + "operations": operations, + "expected_time_range_us": (1000, 100000), # 1ms to 100ms + "timestamp": int(time.time()), + "expires_at": int(time.time()) + 300, # 5 minute expiry + } + + def verify(self, proof: EntropyProof, claimed_hardware: str) -> VerificationResult: + """ + Verify an entropy proof against claimed hardware. + + Args: + proof: Complete entropy proof from hardware + claimed_hardware: Hardware profile key (e.g., "486DX2", "G4") + + Returns: + VerificationResult with scores and issues + """ + scores = EntropyScores() + issues = [] + + # Get expected profile + profile = self.profiles.get(claimed_hardware) + if not profile: + return VerificationResult( + valid=False, + total_score=0.0, + scores=scores, + issues=[f"Unknown hardware profile: {claimed_hardware}"], + emulation_probability=1.0, + ) + + # Layer 1: Verify instruction timing + scores.instruction = self._verify_instruction_layer( + proof.instruction_layer, profile + ) + if scores.instruction < self.thresholds["min_instruction_entropy"]: + issues.append( + f"Instruction timing entropy too low: {scores.instruction:.2f}" + ) + + # Layer 2: Verify memory patterns + scores.memory = self._verify_memory_layer(proof.memory_layer, profile) + if scores.memory < self.thresholds["min_memory_entropy"]: + issues.append(f"Memory pattern entropy too low: {scores.memory:.2f}") + + # Layer 3: Verify bus timing + scores.bus = self._verify_bus_layer(proof.bus_layer, profile) + if scores.bus < self.thresholds["min_bus_entropy"]: + issues.append(f"Bus timing entropy too low: {scores.bus:.2f}") + + # Layer 4: Verify thermal characteristics + scores.thermal = self._verify_thermal_layer(proof.thermal_layer, profile) + if scores.thermal < self.thresholds["min_thermal_entropy"]: + issues.append(f"Thermal entropy suspicious: {scores.thermal:.2f}") + + # Layer 5: Verify architectural quirks + scores.quirks = self._verify_quirk_layer(proof.quirk_layer, profile) + if scores.quirks < self.thresholds["min_quirk_entropy"]: + issues.append(f"Expected quirks not detected: {scores.quirks:.2f}") + + # Instruction timing carries the most weight (0.25) because it is the + # hardest to spoof consistently across all four measured operations. + # Thermal gets the least (0.15) since it can legitimately vary with room temp. + scores.total = ( + scores.instruction * 0.25 + + scores.memory * 0.20 + + scores.bus * 0.20 + + scores.thermal * 0.15 + + scores.quirks * 0.20 + ) + + # Scale emulation probability by hardware-specific difficulty: an Alpha + # (0.95) with the same total_score as a G5 (0.80) is harder to emulate, + # so its inferred emulation probability is lower. + emulation_prob = max(0.0, 1.0 - (scores.total * profile.emulation_difficulty)) + + valid = ( + scores.total >= self.thresholds["total_min_entropy"] and + len(issues) == 0 + ) + + return VerificationResult( + valid=valid, + total_score=scores.total, + scores=scores, + issues=issues, + emulation_probability=emulation_prob, + ) + + def _verify_instruction_layer( + self, layer: InstructionTimingLayer, profile: HardwareProfile + ) -> float: + """Verify instruction timing matches expected profile""" + score = 0.0 + checks = 0 + + for instruction, expected_range in profile.instruction_timings.items(): + if instruction in layer.timings: + checks += 1 + measured = layer.timings[instruction] + min_expected, max_expected = expected_range + + # Check if mean is within expected range + if min_expected <= measured.get("mean", 0) <= max_expected: + score += 0.5 + + # Variance check: real vintage CPUs have natural thermal jitter. + # An emulator tends to be either too uniform (std_dev ≈ 0) or + # unrealistically noisy. The 0.5× mean cap rejects the latter. + std_dev = measured.get("std_dev", 0) + mean = measured.get("mean", 1) + if 0 < std_dev < mean * 0.5: + score += 0.5 + + return score / checks if checks > 0 else 0.0 + + def _verify_memory_layer( + self, layer: MemoryPatternLayer, profile: HardwareProfile + ) -> float: + """Verify memory access patterns""" + score = 0.0 + + # Vintage hardware should show significant stride-dependent timing + if layer.stride_patterns: + stride_1 = layer.stride_patterns.get(1, 1) + stride_64 = layer.stride_patterns.get(64, 1) + if stride_64 / stride_1 > 1.5: + score += 0.3 # Good cache behavior signature + + # Page crossing penalty should be detectable + if layer.page_crossing_penalty > 10.0: + score += 0.3 + + # DRAM refresh interference is the strongest single signal here: + # real DRAM periodically stalls reads for a row refresh cycle (~7µs), + # which virtualised memory and SRAM-backed emulators never exhibit. + if layer.refresh_interference_detected: + score += 0.4 + + return score + + def _verify_bus_layer( + self, layer: BusTimingLayer, profile: HardwareProfile + ) -> float: + """Verify bus timing characteristics""" + score = 0.0 + + # Check bus type matches + if layer.bus_type == profile.expected_bus_type: + score += 0.5 + + # Verify I/O timing is in expected range for bus type + expected_ranges = { + "ISA": (1000, 2500), # Very slow + "EISA": (500, 1500), + "VLB": (100, 500), + "PCI": (50, 200), + "PCI-X": (30, 150), + "AGP": (30, 150), + "PCIe": (5, 50), # Very fast + } + + if layer.bus_type in expected_ranges: + min_io, max_io = expected_ranges[layer.bus_type] + if min_io <= layer.io_read_ns <= max_io: + score += 0.3 + + # Vintage hardware has slower interrupts + if layer.interrupt_latency_us > 1.0: + score += 0.2 + + return score + + def _verify_thermal_layer( + self, layer: ThermalEntropyLayer, profile: HardwareProfile + ) -> float: + """Verify thermal/clock characteristics""" + score = 0.0 + + # Vintage hardware predates DVFS (Dynamic Voltage and Frequency Scaling), + # C-states (CPU idle power states), and P-states (performance states). + # Detecting any of these is a strong sign the "hardware" is a modern host. + if not layer.frequency_changed: + score += 0.4 + + if not layer.c_states_detected: + score += 0.3 + + if not layer.p_states_detected: + score += 0.3 + + return score + + def _verify_quirk_layer( + self, layer: QuirkEntropyLayer, profile: HardwareProfile + ) -> float: + """Verify architectural quirks are present""" + if not profile.expected_quirks: + return 1.0 + + detected = 0 + for expected_quirk in profile.expected_quirks: + if expected_quirk in layer.detected_quirks: + detected += 1 + elif expected_quirk in layer.quirk_test_results: + result = layer.quirk_test_results[expected_quirk] + if result.get("detected") and result.get("confidence", 0) > 0.8: + detected += 1 + + return detected / len(profile.expected_quirks) + + +# ============================================================================= +# Economic Analysis +# ============================================================================= + +def emulation_cost_analysis(hardware_type: str) -> Dict[str, Any]: + """ + Analyze the economic cost of emulating vs. buying hardware. + + This proves why deep entropy makes emulation irrational. + """ + profile = HARDWARE_PROFILES.get(hardware_type) + if not profile: + return {"error": f"Unknown hardware: {hardware_type}"} + + # Rough GPU-hours estimate: harder-to-emulate hardware (emulation_difficulty → 1.0) + # requires more compute to faithfully replicate all timing layers at real-time speed. + gpu_hours_to_emulate = 50 + (profile.emulation_difficulty * 100) + gpu_cost_per_hour = 0.50 + emulation_cost = gpu_hours_to_emulate * gpu_cost_per_hour + + # Real hardware costs (approximate eBay prices) + hardware_prices = { + "486DX2": 50, + "Pentium": 40, + "PentiumII": 30, + "G4": 80, + "G5": 150, + "Alpha": 200, + } + real_cost = hardware_prices.get(hardware_type, 100) + + # Power costs (per year at $0.10/kWh) + power_watts = {"486DX2": 15, "Pentium": 25, "G4": 50, "G5": 100} + watts = power_watts.get(hardware_type, 50) + yearly_power_cost = watts * 24 * 365 * 0.10 / 1000 + + return { + "hardware": profile.name, + "emulation_difficulty": profile.emulation_difficulty, + "estimated_gpu_hours": gpu_hours_to_emulate, + "emulation_cost_usd": emulation_cost, + "real_hardware_cost_usd": real_cost, + "yearly_power_cost_usd": yearly_power_cost, + "breakeven_days": (emulation_cost - real_cost) / (yearly_power_cost / 365), + "recommendation": "BUY REAL HARDWARE" if emulation_cost > real_cost else "EMULATE", + "economic_conclusion": ( + f"Buying a real {profile.name} for ${real_cost} is " + f"{'cheaper' if real_cost < emulation_cost else 'more expensive'} " + f"than emulating (${emulation_cost:.2f})" + ), + } + + +if __name__ == "__main__": + print("=" * 70) + print("RUSTCHAIN DEEP ENTROPY - ECONOMIC ANALYSIS") + print("=" * 70) + print() + print("Why emulation is economically irrational:") + print() + + for hw_type in ["486DX2", "G4", "Alpha"]: + analysis = emulation_cost_analysis(hw_type) + print(f"📟 {analysis['hardware']}") + print(f" Emulation difficulty: {analysis['emulation_difficulty']:.0%}") + print(f" GPU hours to emulate: {analysis['estimated_gpu_hours']:.0f}") + print(f" Emulation cost: ${analysis['emulation_cost_usd']:.2f}") + print(f" Real hardware cost: ${analysis['real_hardware_cost_usd']:.2f}") + print(f" Yearly power cost: ${analysis['yearly_power_cost_usd']:.2f}") + print(f" 💡 {analysis['economic_conclusion']}") + print() + + print("=" * 70) + print("CONCLUSION: Buy a $50 486, don't waste $50+ trying to emulate it!") + print("=" * 70) diff --git a/rips/python/rustchain/fleet_immune_system.py b/rips/python/rustchain/fleet_immune_system.py index 80861c04..568d0a8d 100644 --- a/rips/python/rustchain/fleet_immune_system.py +++ b/rips/python/rustchain/fleet_immune_system.py @@ -1,1110 +1,1110 @@ -#!/usr/bin/env python3 -""" -RIP-201: Fleet Detection Immune System -======================================= - -Protects RustChain reward economics from fleet-scale attacks where a single -actor deploys many machines (real or emulated) to dominate the reward pool. - -Core Principles: - 1. Anti-homogeneity, not anti-modern — diversity IS the immune system - 2. Bucket normalization — rewards split by hardware CLASS, not per-CPU - 3. Fleet signal detection — IP clustering, timing correlation, fingerprint similarity - 4. Multiplier decay — suspected fleet members get diminishing returns - 5. Pressure feedback — overrepresented classes get flattened, rare ones get boosted - -Design Axiom: - "One of everything beats a hundred of one thing." - -Integration: - Called from calculate_epoch_rewards_time_aged() BEFORE distributing rewards. - Requires fleet_signals table populated by submit_attestation(). - -Author: Scott Boudreaux / Elyan Labs -Date: 2026-02-28 -""" - -import hashlib -import math -import sqlite3 -import time -from collections import defaultdict -from typing import Dict, List, Optional, Tuple - -# ═══════════════════════════════════════════════════════════ -# CONFIGURATION -# ═══════════════════════════════════════════════════════════ - -# Hardware class buckets — rewards split equally across these -HARDWARE_BUCKETS = { - "vintage_powerpc": ["g3", "g4", "g5", "powerpc", "powerpc g3", "powerpc g4", - "powerpc g5", "powerpc g3 (750)", "powerpc g4 (74xx)", - "powerpc g5 (970)", "power macintosh"], - "vintage_x86": ["pentium", "pentium4", "retro", "core2", "core2duo", - "nehalem", "sandybridge"], - "apple_silicon": ["apple_silicon", "m1", "m2", "m3"], - "modern": ["modern", "x86_64"], - "exotic": ["power8", "power9", "sparc", "mips", "riscv", "s390x"], - "arm": ["aarch64", "arm", "armv7", "armv7l"], - "retro_console": ["nes_6502", "snes_65c816", "n64_mips", "gba_arm7", - "genesis_68000", "sms_z80", "saturn_sh2", - "gameboy_z80", "gameboy_color_z80", "ps1_mips", - "6502", "65c816", "z80", "sh2"], -} - -# Reverse lookup: arch → bucket name -ARCH_TO_BUCKET = {} -for bucket, archs in HARDWARE_BUCKETS.items(): - for arch in archs: - ARCH_TO_BUCKET[arch] = bucket - -# Fleet detection thresholds -FLEET_SUBNET_THRESHOLD = 3 # 3+ miners from same /24 = signal -FLEET_TIMING_WINDOW_S = 30 # Attestations within 30s = correlated -FLEET_TIMING_THRESHOLD = 0.6 # 60%+ of attestations correlated = signal -FLEET_FINGERPRINT_THRESHOLD = 0.85 # Cosine similarity > 0.85 = signal - -# Fleet score → multiplier decay -# fleet_score 0.0 = solo miner (no decay) -# fleet_score 1.0 = definite fleet (max decay) -FLEET_DECAY_COEFF = 0.4 # Max 40% reduction at fleet_score=1.0 -FLEET_SCORE_FLOOR = 0.6 # Never decay below 60% of base multiplier - -# Bucket normalization mode -# "equal_split" = hard split: each active bucket gets equal share of pot (RECOMMENDED) -# "pressure" = soft: overrepresented buckets get flattened multiplier -BUCKET_MODE = "equal_split" - -# Bucket pressure parameters (used when BUCKET_MODE = "pressure") -BUCKET_IDEAL_SHARE = None # Auto-calculated as 1/num_active_buckets -BUCKET_PRESSURE_STRENGTH = 0.5 # How aggressively to flatten overrepresented buckets -BUCKET_MIN_WEIGHT = 0.3 # Minimum bucket weight (even if massively overrepresented) - -# Minimum miners to trigger fleet detection (below this, everyone is solo) -FLEET_DETECTION_MINIMUM = 4 - - -# ═══════════════════════════════════════════════════════════ -# DATABASE SCHEMA -# ═══════════════════════════════════════════════════════════ - -SCHEMA_SQL = """ --- Fleet signal tracking per attestation -CREATE TABLE IF NOT EXISTS fleet_signals ( - miner TEXT NOT NULL, - epoch INTEGER NOT NULL, - subnet_hash TEXT, -- HMAC of /24 subnet for privacy - attest_ts INTEGER NOT NULL, -- Exact attestation timestamp - clock_drift_cv REAL, -- Clock drift coefficient of variation - cache_latency_hash TEXT, -- Hash of cache timing profile - thermal_signature REAL, -- Thermal drift entropy value - simd_bias_hash TEXT, -- Hash of SIMD timing profile - PRIMARY KEY (miner, epoch) -); - --- Fleet detection results per epoch -CREATE TABLE IF NOT EXISTS fleet_scores ( - miner TEXT NOT NULL, - epoch INTEGER NOT NULL, - fleet_score REAL NOT NULL DEFAULT 0.0, -- 0.0=solo, 1.0=definite fleet - ip_signal REAL DEFAULT 0.0, - timing_signal REAL DEFAULT 0.0, - fingerprint_signal REAL DEFAULT 0.0, - cluster_id TEXT, -- Fleet cluster identifier - effective_multiplier REAL, -- After decay - PRIMARY KEY (miner, epoch) -); - --- Bucket pressure tracking per epoch -CREATE TABLE IF NOT EXISTS bucket_pressure ( - epoch INTEGER NOT NULL, - bucket TEXT NOT NULL, - miner_count INTEGER NOT NULL, - raw_weight REAL NOT NULL, - pressure_factor REAL NOT NULL, -- <1.0 = overrepresented, >1.0 = rare - adjusted_weight REAL NOT NULL, - PRIMARY KEY (epoch, bucket) -); - --- Fleet cluster registry -CREATE TABLE IF NOT EXISTS fleet_clusters ( - cluster_id TEXT PRIMARY KEY, - first_seen_epoch INTEGER NOT NULL, - last_seen_epoch INTEGER NOT NULL, - member_count INTEGER NOT NULL, - detection_signals TEXT, -- JSON: which signals triggered - cumulative_score REAL DEFAULT 0.0 -); -""" - - -def ensure_schema(db: sqlite3.Connection): - """Create fleet immune system tables if they don't exist.""" - db.executescript(SCHEMA_SQL) - db.commit() - - -# ═══════════════════════════════════════════════════════════ -# SIGNAL COLLECTION (called from submit_attestation) -# ═══════════════════════════════════════════════════════════ - -def record_fleet_signals_from_request( - db: sqlite3.Connection, - miner: str, - epoch: int, - ip_address: str, - attest_ts: int, - fingerprint: Optional[dict] = None -): - """ - Record fleet detection signals from an attestation submission. - - Called from submit_attestation() after validation passes. - Stores privacy-preserving hashes of network and fingerprint data. - """ - ensure_schema(db) - - # Hash the /24 subnet rather than storing the raw IP so we can group miners - # by network without logging PII. The 16-char truncation is still collision- - # resistant enough for fleet detection while reducing storage footprint. - if ip_address: - parts = ip_address.split('.') - if len(parts) == 4: - subnet = '.'.join(parts[:3]) - subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] - else: - subnet_hash = hashlib.sha256(ip_address.encode()).hexdigest()[:16] - else: - subnet_hash = None - - # Extract fingerprint signals - clock_drift_cv = None - cache_hash = None - thermal_sig = None - simd_hash = None - - if fingerprint and isinstance(fingerprint, dict): - checks = fingerprint.get("checks", {}) - - # Clock drift coefficient of variation - clock = checks.get("clock_drift", {}).get("data", {}) - clock_drift_cv = clock.get("cv") - - # Cache timing profile hash (privacy-preserving) - cache = checks.get("cache_timing", {}).get("data", {}) - if cache: - cache_str = str(sorted(cache.items())) - cache_hash = hashlib.sha256(cache_str.encode()).hexdigest()[:16] - - # Thermal drift entropy - thermal = checks.get("thermal_drift", {}).get("data", {}) - thermal_sig = thermal.get("entropy", thermal.get("drift_magnitude")) - - # SIMD bias profile hash - simd = checks.get("simd_identity", {}).get("data", {}) - if simd: - simd_str = str(sorted(simd.items())) - simd_hash = hashlib.sha256(simd_str.encode()).hexdigest()[:16] - - db.execute(""" - INSERT OR REPLACE INTO fleet_signals - (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_hash, thermal_sig, simd_hash)) - db.commit() - - -def record_fleet_signals(db_path_or_conn, miner: str, device: dict, - signals: dict, fingerprint: Optional[dict], - attest_ts: int, ip_address: str = None, - epoch: int = None): - """ - Convenience wrapper called from record_attestation_success(). - - Accepts either a DB path (str) or connection, and extracts - the IP from signals if not provided explicitly. - """ - import time as _time - - if isinstance(db_path_or_conn, str): - db = sqlite3.connect(db_path_or_conn) - own = True - else: - db = db_path_or_conn - own = False - - try: - # Get epoch from current time if not provided - if epoch is None: - GENESIS = 1764706927 - BLOCK_TIME = 600 - slot = (int(_time.time()) - GENESIS) // BLOCK_TIME - epoch = slot // 144 - - # Extract IP from signals or request - if not ip_address: - ip_address = signals.get("ip", signals.get("remote_addr", "")) - - record_fleet_signals_from_request(db, miner, epoch, ip_address, - attest_ts, fingerprint) - except Exception as e: - print(f"[RIP-201] Fleet signal recording error: {e}") - finally: - if own: - db.close() - - -# ═══════════════════════════════════════════════════════════ -# FLEET DETECTION ENGINE -# ═══════════════════════════════════════════════════════════ - -def _detect_ip_clustering( - signals: List[dict] -) -> Dict[str, float]: - """ - Detect miners sharing the same /24 subnet. - - Returns: {miner_id: ip_signal} where ip_signal = 0.0-1.0 - """ - scores = {} - - # Group by subnet hash - subnet_groups = defaultdict(list) - for sig in signals: - if sig["subnet_hash"]: - subnet_groups[sig["subnet_hash"]].append(sig["miner"]) - - for subnet, miners in subnet_groups.items(): - count = len(miners) - if count >= FLEET_SUBNET_THRESHOLD: - # Sublinear signal growth (count/20 + 0.15) so a small legit datacenter - # (e.g., 3 boxes) doesn't get the same penalty as a 20-machine farm. - # We take the max so a miner in multiple overlapping clusters keeps - # the highest signal rather than summing them. - signal = min(1.0, count / 20.0 + 0.15) - for m in miners: - scores[m] = max(scores.get(m, 0.0), signal) - - # Solo miners or small groups: 0.0 - for sig in signals: - if sig["miner"] not in scores: - scores[sig["miner"]] = 0.0 - - return scores - - -def _detect_timing_correlation( - signals: List[dict] -) -> Dict[str, float]: - """ - Detect miners whose attestation timestamps are suspiciously synchronized. - - Fleet operators often update all miners in rapid succession. - Real independent operators attest at random times throughout the day. - """ - scores = {} - if len(signals) < FLEET_DETECTION_MINIMUM: - return {s["miner"]: 0.0 for s in signals} - - timestamps = [(s["miner"], s["attest_ts"]) for s in signals] - timestamps.sort(key=lambda x: x[1]) - - # O(n²) comparison is intentional: fleet epochs typically have <100 miners, - # so quadratic cost is negligible and we avoid false negatives from binning. - for i, (miner_a, ts_a) in enumerate(timestamps): - correlated = 0 - total_others = len(timestamps) - 1 - for j, (miner_b, ts_b) in enumerate(timestamps): - if i == j: - continue - if abs(ts_a - ts_b) <= FLEET_TIMING_WINDOW_S: - correlated += 1 - - if total_others > 0: - ratio = correlated / total_others - if ratio >= FLEET_TIMING_THRESHOLD: - # High correlation → fleet signal - scores[miner_a] = min(1.0, ratio) - else: - scores[miner_a] = 0.0 - else: - scores[miner_a] = 0.0 - - return scores - - -def _detect_fingerprint_similarity( - signals: List[dict] -) -> Dict[str, float]: - """ - Detect miners with suspiciously similar hardware fingerprints. - - Identical cache timing profiles, SIMD bias, or thermal signatures - across different "machines" indicate shared hardware or VMs on same host. - """ - scores = {} - if len(signals) < FLEET_DETECTION_MINIMUM: - return {s["miner"]: 0.0 for s in signals} - - # Build similarity groups from hash matches - # Miners sharing 2+ fingerprint hashes are likely same hardware - for i, sig_a in enumerate(signals): - matches = 0 - match_count = 0 - - for j, sig_b in enumerate(signals): - if i == j: - continue - - shared_hashes = 0 - total_hashes = 0 - - # Compare cache timing hash - if sig_a.get("cache_latency_hash") and sig_b.get("cache_latency_hash"): - total_hashes += 1 - if sig_a["cache_latency_hash"] == sig_b["cache_latency_hash"]: - shared_hashes += 1 - - # Compare SIMD bias hash - if sig_a.get("simd_bias_hash") and sig_b.get("simd_bias_hash"): - total_hashes += 1 - if sig_a["simd_bias_hash"] == sig_b["simd_bias_hash"]: - shared_hashes += 1 - - # Compare clock drift CV (within 5% = suspiciously similar) - if sig_a.get("clock_drift_cv") and sig_b.get("clock_drift_cv"): - total_hashes += 1 - cv_a, cv_b = sig_a["clock_drift_cv"], sig_b["clock_drift_cv"] - if cv_b > 0 and abs(cv_a - cv_b) / cv_b < 0.05: - shared_hashes += 1 - - # Compare thermal signature (within 10%) - if sig_a.get("thermal_signature") and sig_b.get("thermal_signature"): - total_hashes += 1 - th_a, th_b = sig_a["thermal_signature"], sig_b["thermal_signature"] - if th_b > 0 and abs(th_a - th_b) / th_b < 0.10: - shared_hashes += 1 - - # Require 2+ matching hashes to avoid false positives from a single - # shared data-centre NTP server inflating clock_drift_cv similarity. - if total_hashes >= 2 and shared_hashes >= 2: - matches += 1 - - # Signal scales with match count: 1→0.35, 2→0.50, 5→0.95, 6+→1.0 - if matches > 0: - scores[sig_a["miner"]] = min(1.0, 0.2 + matches * 0.15) - else: - scores[sig_a["miner"]] = 0.0 - - return scores - - -def compute_fleet_scores( - db: sqlite3.Connection, - epoch: int -) -> Dict[str, float]: - """ - Run all fleet detection algorithms and produce composite fleet scores. - - Returns: {miner_id: fleet_score} where 0.0=solo, 1.0=definite fleet - """ - ensure_schema(db) - - # Fetch signals for this epoch - rows = db.execute(""" - SELECT miner, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash - FROM fleet_signals - WHERE epoch = ? - """, (epoch,)).fetchall() - - if not rows or len(rows) < FLEET_DETECTION_MINIMUM: - # Not enough miners to detect fleets — everyone is solo - return {row[0]: 0.0 for row in rows} - - signals = [] - for row in rows: - signals.append({ - "miner": row[0], - "subnet_hash": row[1], - "attest_ts": row[2], - "clock_drift_cv": row[3], - "cache_latency_hash": row[4], - "thermal_signature": row[5], - "simd_bias_hash": row[6], - }) - - # Run detection algorithms - ip_scores = _detect_ip_clustering(signals) - timing_scores = _detect_timing_correlation(signals) - fingerprint_scores = _detect_fingerprint_similarity(signals) - - # Composite score: weighted average of signals - # IP clustering is strongest signal (hard to fake different subnets) - # Fingerprint similarity is second (hardware-level evidence) - # Timing correlation is supplementary (could be coincidental) - composite = {} - for sig in signals: - m = sig["miner"] - ip = ip_scores.get(m, 0.0) - timing = timing_scores.get(m, 0.0) - fp = fingerprint_scores.get(m, 0.0) - - # Weighted composite: IP 40%, fingerprint 40%, timing 20% - score = (ip * 0.4) + (fp * 0.4) + (timing * 0.2) - - # Corroboration boost: when two independent signals both fire above 0.3 - # it is far less likely to be coincidence, so we amplify by 30%. - # Capped at 1.0 to keep the score a unit probability. - fired = sum(1 for s in [ip, fp, timing] if s > 0.3) - if fired >= 2: - score = min(1.0, score * 1.3) - - composite[m] = round(score, 4) - - # Record to DB for audit trail - db.execute(""" - INSERT OR REPLACE INTO fleet_scores - (miner, epoch, fleet_score, ip_signal, timing_signal, - fingerprint_signal) - VALUES (?, ?, ?, ?, ?, ?) - """, (m, epoch, composite[m], ip, timing, fp)) - - db.commit() - return composite - - -# ═══════════════════════════════════════════════════════════ -# BUCKET NORMALIZATION -# ═══════════════════════════════════════════════════════════ - -def classify_miner_bucket(device_arch: str) -> str: - """Map a device architecture to its hardware bucket.""" - return ARCH_TO_BUCKET.get(device_arch.lower(), "modern") - - -def compute_bucket_pressure( - miners: List[Tuple[str, str, float]], - epoch: int, - db: Optional[sqlite3.Connection] = None -) -> Dict[str, float]: - """ - Compute pressure factors for each hardware bucket. - - If a bucket is overrepresented (more miners than its fair share), - its pressure factor drops below 1.0 — reducing rewards for that class. - Underrepresented buckets get boosted above 1.0. - - Args: - miners: List of (miner_id, device_arch, base_weight) tuples - epoch: Current epoch number - db: Optional DB connection for recording - - Returns: - {bucket_name: pressure_factor} - """ - # Count miners and total weight per bucket - bucket_counts = defaultdict(int) - bucket_weights = defaultdict(float) - bucket_miners = defaultdict(list) - - for miner_id, arch, weight in miners: - bucket = classify_miner_bucket(arch) - bucket_counts[bucket] += 1 - bucket_weights[bucket] += weight - bucket_miners[bucket].append(miner_id) - - active_buckets = [b for b in bucket_counts if bucket_counts[b] > 0] - num_active = len(active_buckets) - - if num_active == 0: - return {} - - # Ideal: equal miner count per bucket - total_miners = sum(bucket_counts.values()) - ideal_per_bucket = total_miners / num_active - - pressure = {} - for bucket in active_buckets: - count = bucket_counts[bucket] - ratio = count / ideal_per_bucket # >1 = overrepresented, <1 = rare - - if ratio > 1.0: - # Harmonic diminishing returns: 1/(1 + s*(r-1)) where s=PRESSURE_STRENGTH. - # At s=0.5: ratio 2→0.67, ratio 5→0.44. Floor at BUCKET_MIN_WEIGHT - # to avoid completely zeroing out any single bucket. - factor = 1.0 / (1.0 + BUCKET_PRESSURE_STRENGTH * (ratio - 1.0)) - factor = max(BUCKET_MIN_WEIGHT, factor) - else: - # Underrepresented bucket: linear boost up to 1.5x to incentivise - # diversity without creating an extreme advantage for ultra-rare hardware. - factor = 1.0 + (1.0 - ratio) * 0.5 - factor = min(1.5, factor) - - pressure[bucket] = round(factor, 4) - - # Record to DB - if db: - try: - db.execute(""" - INSERT OR REPLACE INTO bucket_pressure - (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) - VALUES (?, ?, ?, ?, ?, ?) - """, (epoch, bucket, count, bucket_weights[bucket], - factor, bucket_weights[bucket] * factor)) - except Exception: - pass # Non-critical recording - - if db: - try: - db.commit() - except Exception: - pass - - return pressure - - -# ═══════════════════════════════════════════════════════════ -# IMMUNE-ADJUSTED REWARD CALCULATION -# ═══════════════════════════════════════════════════════════ - -def apply_fleet_decay( - base_multiplier: float, - fleet_score: float -) -> float: - """ - Apply fleet detection decay to a miner's base multiplier. - - fleet_score 0.0 → no decay (solo miner) - fleet_score 1.0 → maximum decay (confirmed fleet) - - Formula: effective = base × (1.0 - fleet_score × DECAY_COEFF) - Floor: Never below FLEET_SCORE_FLOOR × base - - Examples (base=2.5 G4): - fleet_score=0.0 → 2.5 (solo miner, full bonus) - fleet_score=0.3 → 2.2 (some fleet signals) - fleet_score=0.7 → 1.8 (strong fleet signals) - fleet_score=1.0 → 1.5 (confirmed fleet, 40% decay) - """ - decay = fleet_score * FLEET_DECAY_COEFF - effective = base_multiplier * (1.0 - decay) - floor = base_multiplier * FLEET_SCORE_FLOOR - return max(floor, effective) - - -def calculate_immune_rewards_equal_split( - db: sqlite3.Connection, - epoch: int, - miners: List[Tuple[str, str]], - chain_age_years: float, - total_reward_urtc: int -) -> Dict[str, int]: - """ - Calculate rewards using equal bucket split (RECOMMENDED mode). - - The pot is divided EQUALLY among active hardware buckets. - Within each bucket, miners share their slice by time-aged weight. - Fleet members get decayed multipliers WITHIN their bucket. - - This is the nuclear option against fleet attacks: - - 500 modern boxes share 1/N of the pot (where N = active buckets) - - 1 solo G4 gets 1/N of the pot all to itself - - The fleet operator's $5M in hardware earns the same TOTAL as one G4 - - Args: - db: Database connection - epoch: Epoch being settled - miners: List of (miner_id, device_arch) tuples - chain_age_years: Chain age for time-aging - total_reward_urtc: Total uRTC to distribute - - Returns: - {miner_id: reward_urtc} - """ - from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier - - if not miners: - return {} - - # Step 1: Fleet detection - fleet_scores = compute_fleet_scores(db, epoch) - - # Step 2: Classify miners into buckets with fleet-decayed weights - buckets = defaultdict(list) # bucket → [(miner_id, decayed_weight)] - - for miner_id, arch in miners: - base = get_time_aged_multiplier(arch, chain_age_years) - fleet_score = fleet_scores.get(miner_id, 0.0) - effective = apply_fleet_decay(base, fleet_score) - bucket = classify_miner_bucket(arch) - buckets[bucket].append((miner_id, effective)) - - # Record - db.execute(""" - UPDATE fleet_scores SET effective_multiplier = ? - WHERE miner = ? AND epoch = ? - """, (effective, miner_id, epoch)) - - # Step 3: Split pot equally among active buckets - active_buckets = {b: members for b, members in buckets.items() if members} - num_buckets = len(active_buckets) - - if num_buckets == 0: - return {} - - # Integer division leaves rounding dust; we track it and assign it to the - # last bucket so no uRTC is ever lost from the epoch reward pool. - pot_per_bucket = total_reward_urtc // num_buckets - remainder = total_reward_urtc - (pot_per_bucket * num_buckets) - - # Step 4: Distribute within each bucket by weight - rewards = {} - bucket_index = 0 - - for bucket, members in active_buckets.items(): - # Last bucket gets remainder (rounding dust) - bucket_pot = pot_per_bucket + (remainder if bucket_index == num_buckets - 1 else 0) - - total_weight = sum(w for _, w in members) - if total_weight <= 0: - # Edge case: all weights zero (shouldn't happen) - per_miner = bucket_pot // len(members) - for miner_id, _ in members: - rewards[miner_id] = per_miner - else: - remaining = bucket_pot - for i, (miner_id, weight) in enumerate(members): - if i == len(members) - 1: - share = remaining - else: - share = int((weight / total_weight) * bucket_pot) - remaining -= share - rewards[miner_id] = share - - # Record bucket pressure data - try: - db.execute(""" - INSERT OR REPLACE INTO bucket_pressure - (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) - VALUES (?, ?, ?, ?, ?, ?) - """, (epoch, bucket, len(members), total_weight, - 1.0 / num_buckets, bucket_pot / total_reward_urtc if total_reward_urtc > 0 else 0)) - except Exception: - pass - - bucket_index += 1 - - db.commit() - return rewards - - -def calculate_immune_weights( - db: sqlite3.Connection, - epoch: int, - miners: List[Tuple[str, str]], - chain_age_years: float, - total_reward_urtc: int = 0 -) -> Dict[str, float]: - """ - Calculate immune-system-adjusted weights for epoch reward distribution. - - Main entry point. Dispatches to equal_split or pressure mode based on config. - - When BUCKET_MODE = "equal_split" and total_reward_urtc is provided, - returns {miner_id: reward_urtc} (integer rewards, ready to credit). - - When BUCKET_MODE = "pressure", returns {miner_id: adjusted_weight} - (float weights for pro-rata distribution by caller). - - Args: - db: Database connection - epoch: Epoch being settled - miners: List of (miner_id, device_arch) tuples - chain_age_years: Chain age for time-aging calculation - total_reward_urtc: Total reward in uRTC (required for equal_split mode) - - Returns: - {miner_id: value} — either reward_urtc (int) or weight (float) - """ - if BUCKET_MODE == "equal_split" and total_reward_urtc > 0: - return calculate_immune_rewards_equal_split( - db, epoch, miners, chain_age_years, total_reward_urtc - ) - - # Fallback: pressure mode (original behavior) - from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier - - if not miners: - return {} - - # Step 1: Base time-aged multipliers - base_weights = [] - for miner_id, arch in miners: - base = get_time_aged_multiplier(arch, chain_age_years) - base_weights.append((miner_id, arch, base)) - - # Step 2: Fleet detection - fleet_scores = compute_fleet_scores(db, epoch) - - # Step 3: Apply fleet decay - decayed_weights = [] - for miner_id, arch, base in base_weights: - score = fleet_scores.get(miner_id, 0.0) - effective = apply_fleet_decay(base, score) - decayed_weights.append((miner_id, arch, effective)) - - db.execute(""" - UPDATE fleet_scores SET effective_multiplier = ? - WHERE miner = ? AND epoch = ? - """, (effective, miner_id, epoch)) - - # Step 4: Bucket pressure normalization - pressure = compute_bucket_pressure(decayed_weights, epoch, db) - - # Step 5: Apply pressure to get final weights - final_weights = {} - for miner_id, arch, weight in decayed_weights: - bucket = classify_miner_bucket(arch) - bucket_factor = pressure.get(bucket, 1.0) - final_weights[miner_id] = weight * bucket_factor - - db.commit() - return final_weights - - -# ═══════════════════════════════════════════════════════════ -# ADMIN / DIAGNOSTIC ENDPOINTS -# ═══════════════════════════════════════════════════════════ - -def get_fleet_report(db: sqlite3.Connection, epoch: int) -> dict: - """Generate a human-readable fleet detection report for an epoch.""" - ensure_schema(db) - - scores = db.execute(""" - SELECT miner, fleet_score, ip_signal, timing_signal, - fingerprint_signal, effective_multiplier - FROM fleet_scores WHERE epoch = ? - ORDER BY fleet_score DESC - """, (epoch,)).fetchall() - - pressure = db.execute(""" - SELECT bucket, miner_count, pressure_factor, raw_weight, adjusted_weight - FROM bucket_pressure WHERE epoch = ? - """, (epoch,)).fetchall() - - flagged = [s for s in scores if s[1] > 0.3] - - return { - "epoch": epoch, - "total_miners": len(scores), - "flagged_miners": len(flagged), - "fleet_scores": [ - { - "miner": s[0], - "fleet_score": s[1], - "signals": { - "ip_clustering": s[2], - "timing_correlation": s[3], - "fingerprint_similarity": s[4] - }, - "effective_multiplier": s[5] - } - for s in scores - ], - "bucket_pressure": [ - { - "bucket": p[0], - "miner_count": p[1], - "pressure_factor": p[2], - "raw_weight": p[3], - "adjusted_weight": p[4] - } - for p in pressure - ] - } - - -def register_fleet_endpoints(app, DB_PATH): - """Register Flask endpoints for fleet immune system admin.""" - from flask import request, jsonify - - @app.route('/admin/fleet/report', methods=['GET']) - def fleet_report(): - admin_key = request.headers.get("X-Admin-Key", "") - import os - if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): - return jsonify({"error": "Unauthorized"}), 401 - - epoch = request.args.get('epoch', type=int) - if epoch is None: - from rewards_implementation_rip200 import current_slot, slot_to_epoch - epoch = slot_to_epoch(current_slot()) - 1 - - with sqlite3.connect(DB_PATH) as db: - report = get_fleet_report(db, epoch) - return jsonify(report) - - @app.route('/admin/fleet/scores', methods=['GET']) - def fleet_scores(): - admin_key = request.headers.get("X-Admin-Key", "") - import os - if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): - return jsonify({"error": "Unauthorized"}), 401 - - miner = request.args.get('miner') - limit = request.args.get('limit', 10, type=int) - - with sqlite3.connect(DB_PATH) as db: - if miner: - rows = db.execute(""" - SELECT epoch, fleet_score, ip_signal, timing_signal, - fingerprint_signal, effective_multiplier - FROM fleet_scores WHERE miner = ? - ORDER BY epoch DESC LIMIT ? - """, (miner, limit)).fetchall() - else: - rows = db.execute(""" - SELECT miner, epoch, fleet_score, ip_signal, - timing_signal, fingerprint_signal - FROM fleet_scores - WHERE fleet_score > 0.3 - ORDER BY fleet_score DESC LIMIT ? - """, (limit,)).fetchall() - - return jsonify({"scores": [dict(zip( - ["miner", "epoch", "fleet_score", "ip_signal", - "timing_signal", "fingerprint_signal"], r - )) for r in rows]}) - - print("[RIP-201] Fleet immune system endpoints registered") - - -# ═══════════════════════════════════════════════════════════ -# SELF-TEST -# ═══════════════════════════════════════════════════════════ - -if __name__ == "__main__": - print("=" * 60) - print("RIP-201: Fleet Detection Immune System — Self Test") - print("=" * 60) - - # Create in-memory DB - db = sqlite3.connect(":memory:") - ensure_schema(db) - - # Also need miner_attest_recent for the full pipeline - db.execute(""" - CREATE TABLE IF NOT EXISTS miner_attest_recent ( - miner TEXT PRIMARY KEY, - ts_ok INTEGER NOT NULL, - device_family TEXT, - device_arch TEXT, - entropy_score REAL DEFAULT 0.0, - fingerprint_passed INTEGER DEFAULT 0 - ) - """) - - EPOCH = 100 - - # ─── Scenario 1: Healthy diverse network ─── - print("\n--- Scenario 1: Healthy Diverse Network (8 unique miners) ---") - - healthy_miners = [ - ("g4-powerbook-115", "g4", "10.1.1", 1000, 0.092, "cache_a", 0.45, "simd_a"), - ("dual-g4-125", "g4", "10.1.2", 1200, 0.088, "cache_b", 0.52, "simd_b"), - ("ppc-g5-130", "g5", "10.2.1", 1500, 0.105, "cache_c", 0.38, "simd_c"), - ("victus-x86", "modern", "192.168.0", 2000, 0.049, "cache_d", 0.61, "simd_d"), - ("sophia-nas", "modern", "192.168.1", 2300, 0.055, "cache_e", 0.58, "simd_e"), - ("mac-mini-m2", "apple_silicon", "10.3.1", 3000, 0.033, "cache_f", 0.42, "simd_f"), - ("power8-server", "power8", "10.4.1", 4000, 0.071, "cache_g", 0.55, "simd_g"), - ("ryan-factorio", "modern", "76.8.228", 5000, 0.044, "cache_h", 0.63, "simd_h"), - ] - - for m, arch, subnet, ts, cv, cache, thermal, simd in healthy_miners: - subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] - db.execute(""" - INSERT OR REPLACE INTO fleet_signals - (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (m, EPOCH, subnet_hash, ts, cv, cache, thermal, simd)) - - db.commit() - scores = compute_fleet_scores(db, EPOCH) - - print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") - print(f" {'─'*25} {'─'*12} {'─'*15}") - for m, arch, *_ in healthy_miners: - s = scores.get(m, 0.0) - status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" - print(f" {m:<25} {s:>12.4f} {status:<15}") - - # ─── Scenario 2: Fleet attack (10 modern boxes, same subnet) ─── - print("\n--- Scenario 2: Fleet Attack (10 modern boxes, same /24) ---") - - EPOCH2 = 101 - fleet_miners = [] - - # 3 legitimate miners - fleet_miners.append(("g4-real-1", "g4", "10.1.1", 1000, 0.092, "cache_real1", 0.45, "simd_real1")) - fleet_miners.append(("g5-real-1", "g5", "10.2.1", 1800, 0.105, "cache_real2", 0.38, "simd_real2")) - fleet_miners.append(("m2-real-1", "apple_silicon", "10.3.1", 2500, 0.033, "cache_real3", 0.42, "simd_real3")) - - # 10 fleet miners — same subnet, similar timing, similar fingerprints - for i in range(10): - fleet_miners.append(( - f"fleet-box-{i}", - "modern", - "203.0.113", # All same /24 subnet - 3000 + i * 5, # Attestation within 50s of each other - 0.048 + i * 0.001, # Nearly identical clock drift - "cache_fleet_shared", # SAME cache timing hash - 0.60 + i * 0.005, # Very similar thermal signatures - "simd_fleet_shared", # SAME SIMD hash - )) - - for m, arch, subnet, ts, cv, cache, thermal, simd in fleet_miners: - subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] - db.execute(""" - INSERT OR REPLACE INTO fleet_signals - (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, - cache_latency_hash, thermal_signature, simd_bias_hash) - VALUES (?, ?, ?, ?, ?, ?, ?, ?) - """, (m, EPOCH2, subnet_hash, ts, cv, cache, thermal, simd)) - - db.commit() - scores2 = compute_fleet_scores(db, EPOCH2) - - print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") - print(f" {'─'*25} {'─'*12} {'─'*15}") - for m, arch, *_ in fleet_miners: - s = scores2.get(m, 0.0) - status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" - print(f" {m:<25} {s:>12.4f} {status:<15}") - - # ─── Scenario 3: Bucket pressure ─── - print("\n--- Scenario 3: Bucket Pressure (500 modern vs 3 vintage) ---") - - fleet_attack = [("g4-solo", "g4", 2.5), ("g5-solo", "g5", 2.0), ("g3-solo", "g3", 1.8)] - for i in range(500): - fleet_attack.append((f"modern-{i}", "modern", 1.0)) - - pressure = compute_bucket_pressure(fleet_attack, 200) - - print(f" {'Bucket':<20} {'Pressure':>10} {'Effect':<30}") - print(f" {'─'*20} {'─'*10} {'─'*30}") - for bucket, factor in sorted(pressure.items(), key=lambda x: x[1]): - if factor < 1.0: - effect = f"FLATTENED (each modern box worth {factor:.2f}x)" - elif factor > 1.0: - effect = f"BOOSTED (rare hardware bonus {factor:.2f}x)" - else: - effect = "neutral" - print(f" {bucket:<20} {factor:>10.4f} {effect:<30}") - - # ─── Scenario 4: Fleet decay on multipliers ─── - print("\n--- Scenario 4: Fleet Decay Examples ---") - - examples = [ - ("G4 (solo)", 2.5, 0.0), - ("G4 (mild fleet)", 2.5, 0.3), - ("G4 (strong fleet)", 2.5, 0.7), - ("G4 (confirmed fleet)", 2.5, 1.0), - ("Modern (solo)", 1.0, 0.0), - ("Modern (strong fleet)", 1.0, 0.7), - ("Modern (confirmed fleet)", 1.0, 1.0), - ] - - print(f" {'Miner Type':<25} {'Base':>6} {'Fleet':>7} {'Effective':>10} {'Decay':>8}") - print(f" {'─'*25} {'─'*6} {'─'*7} {'─'*10} {'─'*8}") - for name, base, score in examples: - eff = apply_fleet_decay(base, score) - decay_pct = (1.0 - eff/base) * 100 if base > 0 else 0 - print(f" {name:<25} {base:>6.2f} {score:>7.2f} {eff:>10.3f} {decay_pct:>7.1f}%") - - # ─── Combined effect ─── - print("\n--- Combined: 500 Modern Fleet vs 3 Vintage Solo ---") - print(" Without immune system:") - total_w_no_immune = 500 * 1.0 + 2.5 + 2.0 + 1.8 - g4_share = (2.5 / total_w_no_immune) * 1.5 - modern_total = (500 * 1.0 / total_w_no_immune) * 1.5 - modern_each = modern_total / 500 - print(f" G4 solo: {g4_share:.6f} RTC/epoch") - print(f" 500 modern fleet: {modern_total:.6f} RTC/epoch total ({modern_each:.8f} each)") - print(f" Fleet ROI: {modern_total/g4_share:.1f}x the G4 solo reward") - - print("\n With RIP-201 PRESSURE mode (soft):") - fleet_eff = apply_fleet_decay(1.0, 0.8) # ~0.68 - g4_eff = 2.5 # Solo, no decay - bucket_p_modern = compute_bucket_pressure( - [("g4", "g4", g4_eff), ("g5", "g5", 2.0), ("g3", "g3", 1.8)] + - [(f"m{i}", "modern", fleet_eff) for i in range(500)], - 999 - ) - modern_p = bucket_p_modern.get("modern", 1.0) - vintage_p = bucket_p_modern.get("vintage_powerpc", 1.0) - - g4_final = g4_eff * vintage_p - modern_final = fleet_eff * modern_p - total_w_immune = g4_final + 2.0 * vintage_p + 1.8 * vintage_p + 500 * modern_final - g4_share_immune = (g4_final / total_w_immune) * 1.5 - modern_total_immune = (500 * modern_final / total_w_immune) * 1.5 - modern_each_immune = modern_total_immune / 500 - - print(f" Fleet score: 0.80 → multiplier decay to {fleet_eff:.3f}") - print(f" Modern pressure: {modern_p:.4f} (bucket flattened)") - print(f" Vintage pressure: {vintage_p:.4f} (bucket boosted)") - print(f" G4 solo: {g4_share_immune:.6f} RTC/epoch") - print(f" 500 modern fleet: {modern_total_immune:.6f} RTC/epoch total ({modern_each_immune:.8f} each)") - print(f" Fleet ROI: {modern_total_immune/g4_share_immune:.1f}x the G4 solo reward") - - # ─── Equal Split mode (the real defense) ─── - print("\n With RIP-201 EQUAL SPLIT mode (RECOMMENDED):") - print(" Pot split: 1.5 RTC ÷ 2 active buckets = 0.75 RTC each") - - # In equal split: vintage_powerpc bucket gets 0.75 RTC, modern bucket gets 0.75 RTC - vintage_pot = 0.75 # RTC - modern_pot = 0.75 # RTC - - # Within vintage bucket: 3 miners split 0.75 by weight - vintage_total_w = 2.5 + 2.0 + 1.8 - g4_equal = (2.5 / vintage_total_w) * vintage_pot - g5_equal = (2.0 / vintage_total_w) * vintage_pot - g3_equal = (1.8 / vintage_total_w) * vintage_pot - - # Within modern bucket: 500 fleet miners split 0.75 by decayed weight - modern_each_equal = modern_pot / 500 # Equal weight within bucket (all modern) - - print(f" Vintage bucket (3 miners share 0.75 RTC):") - print(f" G4 solo: {g4_equal:.6f} RTC/epoch") - print(f" G5 solo: {g5_equal:.6f} RTC/epoch") - print(f" G3 solo: {g3_equal:.6f} RTC/epoch") - print(f" Modern bucket (500 fleet share 0.75 RTC):") - print(f" Each fleet box: {modern_each_equal:.8f} RTC/epoch") - print(f" Fleet ROI: {modern_pot/g4_equal:.1f}x the G4 solo reward (TOTAL fleet)") - print(f" Per-box ROI: {modern_each_equal/g4_equal:.4f}x (each fleet box vs G4)") - print(f" Fleet gets: {modern_pot/1.5*100:.0f}% of pot (was {modern_total/1.5*100:.0f}%)") - print(f" G4 earns: {g4_equal/g4_share:.0f}x more than without immune system") - - # ─── The economics ─── - print("\n === ECONOMIC IMPACT ===") - print(f" Without immune: 500 boxes earn {modern_total:.4f} RTC/epoch = {modern_total*365:.1f} RTC/year") - print(f" With equal split: 500 boxes earn {modern_pot:.4f} RTC/epoch = {modern_pot*365:.1f} RTC/year") - hardware_cost = 5_000_000 # $5M - rtc_value = 0.10 # $0.10/RTC - annual_no_immune = modern_total * 365 * rtc_value - annual_equal = modern_pot * 365 * rtc_value - years_to_roi_no = hardware_cost / annual_no_immune if annual_no_immune > 0 else float('inf') - years_to_roi_eq = hardware_cost / annual_equal if annual_equal > 0 else float('inf') - print(f" At $0.10/RTC, fleet annual revenue:") - print(f" No immune: ${annual_no_immune:,.2f}/year → ROI in {years_to_roi_no:,.0f} years") - print(f" Equal split: ${annual_equal:,.2f}/year → ROI in {years_to_roi_eq:,.0f} years") - print(f" A $5M hardware fleet NEVER pays for itself. Attack neutralized.") - - print("\n" + "=" * 60) - print("RIP-201 self-test complete.") - print("One of everything beats a hundred of one thing.") - print("=" * 60) +#!/usr/bin/env python3 +""" +RIP-201: Fleet Detection Immune System +======================================= + +Protects RustChain reward economics from fleet-scale attacks where a single +actor deploys many machines (real or emulated) to dominate the reward pool. + +Core Principles: + 1. Anti-homogeneity, not anti-modern — diversity IS the immune system + 2. Bucket normalization — rewards split by hardware CLASS, not per-CPU + 3. Fleet signal detection — IP clustering, timing correlation, fingerprint similarity + 4. Multiplier decay — suspected fleet members get diminishing returns + 5. Pressure feedback — overrepresented classes get flattened, rare ones get boosted + +Design Axiom: + "One of everything beats a hundred of one thing." + +Integration: + Called from calculate_epoch_rewards_time_aged() BEFORE distributing rewards. + Requires fleet_signals table populated by submit_attestation(). + +Author: Scott Boudreaux / Elyan Labs +Date: 2026-02-28 +""" + +import hashlib +import math +import sqlite3 +import time +from collections import defaultdict +from typing import Dict, List, Optional, Tuple + +# ═══════════════════════════════════════════════════════════ +# CONFIGURATION +# ═══════════════════════════════════════════════════════════ + +# Hardware class buckets — rewards split equally across these +HARDWARE_BUCKETS = { + "vintage_powerpc": ["g3", "g4", "g5", "powerpc", "powerpc g3", "powerpc g4", + "powerpc g5", "powerpc g3 (750)", "powerpc g4 (74xx)", + "powerpc g5 (970)", "power macintosh"], + "vintage_x86": ["pentium", "pentium4", "retro", "core2", "core2duo", + "nehalem", "sandybridge"], + "apple_silicon": ["apple_silicon", "m1", "m2", "m3"], + "modern": ["modern", "x86_64"], + "exotic": ["power8", "power9", "sparc", "mips", "riscv", "s390x"], + "arm": ["aarch64", "arm", "armv7", "armv7l"], + "retro_console": ["nes_6502", "snes_65c816", "n64_mips", "gba_arm7", + "genesis_68000", "sms_z80", "saturn_sh2", + "gameboy_z80", "gameboy_color_z80", "ps1_mips", + "6502", "65c816", "z80", "sh2"], +} + +# Reverse lookup: arch → bucket name +ARCH_TO_BUCKET = {} +for bucket, archs in HARDWARE_BUCKETS.items(): + for arch in archs: + ARCH_TO_BUCKET[arch] = bucket + +# Fleet detection thresholds +FLEET_SUBNET_THRESHOLD = 3 # 3+ miners from same /24 = signal +FLEET_TIMING_WINDOW_S = 30 # Attestations within 30s = correlated +FLEET_TIMING_THRESHOLD = 0.6 # 60%+ of attestations correlated = signal +FLEET_FINGERPRINT_THRESHOLD = 0.85 # Cosine similarity > 0.85 = signal + +# Fleet score → multiplier decay +# fleet_score 0.0 = solo miner (no decay) +# fleet_score 1.0 = definite fleet (max decay) +FLEET_DECAY_COEFF = 0.4 # Max 40% reduction at fleet_score=1.0 +FLEET_SCORE_FLOOR = 0.6 # Never decay below 60% of base multiplier + +# Bucket normalization mode +# "equal_split" = hard split: each active bucket gets equal share of pot (RECOMMENDED) +# "pressure" = soft: overrepresented buckets get flattened multiplier +BUCKET_MODE = "equal_split" + +# Bucket pressure parameters (used when BUCKET_MODE = "pressure") +BUCKET_IDEAL_SHARE = None # Auto-calculated as 1/num_active_buckets +BUCKET_PRESSURE_STRENGTH = 0.5 # How aggressively to flatten overrepresented buckets +BUCKET_MIN_WEIGHT = 0.3 # Minimum bucket weight (even if massively overrepresented) + +# Minimum miners to trigger fleet detection (below this, everyone is solo) +FLEET_DETECTION_MINIMUM = 4 + + +# ═══════════════════════════════════════════════════════════ +# DATABASE SCHEMA +# ═══════════════════════════════════════════════════════════ + +SCHEMA_SQL = """ +-- Fleet signal tracking per attestation +CREATE TABLE IF NOT EXISTS fleet_signals ( + miner TEXT NOT NULL, + epoch INTEGER NOT NULL, + subnet_hash TEXT, -- HMAC of /24 subnet for privacy + attest_ts INTEGER NOT NULL, -- Exact attestation timestamp + clock_drift_cv REAL, -- Clock drift coefficient of variation + cache_latency_hash TEXT, -- Hash of cache timing profile + thermal_signature REAL, -- Thermal drift entropy value + simd_bias_hash TEXT, -- Hash of SIMD timing profile + PRIMARY KEY (miner, epoch) +); + +-- Fleet detection results per epoch +CREATE TABLE IF NOT EXISTS fleet_scores ( + miner TEXT NOT NULL, + epoch INTEGER NOT NULL, + fleet_score REAL NOT NULL DEFAULT 0.0, -- 0.0=solo, 1.0=definite fleet + ip_signal REAL DEFAULT 0.0, + timing_signal REAL DEFAULT 0.0, + fingerprint_signal REAL DEFAULT 0.0, + cluster_id TEXT, -- Fleet cluster identifier + effective_multiplier REAL, -- After decay + PRIMARY KEY (miner, epoch) +); + +-- Bucket pressure tracking per epoch +CREATE TABLE IF NOT EXISTS bucket_pressure ( + epoch INTEGER NOT NULL, + bucket TEXT NOT NULL, + miner_count INTEGER NOT NULL, + raw_weight REAL NOT NULL, + pressure_factor REAL NOT NULL, -- <1.0 = overrepresented, >1.0 = rare + adjusted_weight REAL NOT NULL, + PRIMARY KEY (epoch, bucket) +); + +-- Fleet cluster registry +CREATE TABLE IF NOT EXISTS fleet_clusters ( + cluster_id TEXT PRIMARY KEY, + first_seen_epoch INTEGER NOT NULL, + last_seen_epoch INTEGER NOT NULL, + member_count INTEGER NOT NULL, + detection_signals TEXT, -- JSON: which signals triggered + cumulative_score REAL DEFAULT 0.0 +); +""" + + +def ensure_schema(db: sqlite3.Connection): + """Create fleet immune system tables if they don't exist.""" + db.executescript(SCHEMA_SQL) + db.commit() + + +# ═══════════════════════════════════════════════════════════ +# SIGNAL COLLECTION (called from submit_attestation) +# ═══════════════════════════════════════════════════════════ + +def record_fleet_signals_from_request( + db: sqlite3.Connection, + miner: str, + epoch: int, + ip_address: str, + attest_ts: int, + fingerprint: Optional[dict] = None +): + """ + Record fleet detection signals from an attestation submission. + + Called from submit_attestation() after validation passes. + Stores privacy-preserving hashes of network and fingerprint data. + """ + ensure_schema(db) + + # Hash the /24 subnet rather than storing the raw IP so we can group miners + # by network without logging PII. The 16-char truncation is still collision- + # resistant enough for fleet detection while reducing storage footprint. + if ip_address: + parts = ip_address.split('.') + if len(parts) == 4: + subnet = '.'.join(parts[:3]) + subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] + else: + subnet_hash = hashlib.sha256(ip_address.encode()).hexdigest()[:16] + else: + subnet_hash = None + + # Extract fingerprint signals + clock_drift_cv = None + cache_hash = None + thermal_sig = None + simd_hash = None + + if fingerprint and isinstance(fingerprint, dict): + checks = fingerprint.get("checks", {}) + + # Clock drift coefficient of variation + clock = checks.get("clock_drift", {}).get("data", {}) + clock_drift_cv = clock.get("cv") + + # Cache timing profile hash (privacy-preserving) + cache = checks.get("cache_timing", {}).get("data", {}) + if cache: + cache_str = str(sorted(cache.items())) + cache_hash = hashlib.sha256(cache_str.encode()).hexdigest()[:16] + + # Thermal drift entropy + thermal = checks.get("thermal_drift", {}).get("data", {}) + thermal_sig = thermal.get("entropy", thermal.get("drift_magnitude")) + + # SIMD bias profile hash + simd = checks.get("simd_identity", {}).get("data", {}) + if simd: + simd_str = str(sorted(simd.items())) + simd_hash = hashlib.sha256(simd_str.encode()).hexdigest()[:16] + + db.execute(""" + INSERT OR REPLACE INTO fleet_signals + (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_hash, thermal_sig, simd_hash)) + db.commit() + + +def record_fleet_signals(db_path_or_conn, miner: str, device: dict, + signals: dict, fingerprint: Optional[dict], + attest_ts: int, ip_address: str = None, + epoch: int = None): + """ + Convenience wrapper called from record_attestation_success(). + + Accepts either a DB path (str) or connection, and extracts + the IP from signals if not provided explicitly. + """ + import time as _time + + if isinstance(db_path_or_conn, str): + db = sqlite3.connect(db_path_or_conn) + own = True + else: + db = db_path_or_conn + own = False + + try: + # Get epoch from current time if not provided + if epoch is None: + GENESIS = 1764706927 + BLOCK_TIME = 600 + slot = (int(_time.time()) - GENESIS) // BLOCK_TIME + epoch = slot // 144 + + # Extract IP from signals or request + if not ip_address: + ip_address = signals.get("ip", signals.get("remote_addr", "")) + + record_fleet_signals_from_request(db, miner, epoch, ip_address, + attest_ts, fingerprint) + except Exception as e: + print(f"[RIP-201] Fleet signal recording error: {e}") + finally: + if own: + db.close() + + +# ═══════════════════════════════════════════════════════════ +# FLEET DETECTION ENGINE +# ═══════════════════════════════════════════════════════════ + +def _detect_ip_clustering( + signals: List[dict] +) -> Dict[str, float]: + """ + Detect miners sharing the same /24 subnet. + + Returns: {miner_id: ip_signal} where ip_signal = 0.0-1.0 + """ + scores = {} + + # Group by subnet hash + subnet_groups = defaultdict(list) + for sig in signals: + if sig["subnet_hash"]: + subnet_groups[sig["subnet_hash"]].append(sig["miner"]) + + for subnet, miners in subnet_groups.items(): + count = len(miners) + if count >= FLEET_SUBNET_THRESHOLD: + # Sublinear signal growth (count/20 + 0.15) so a small legit datacenter + # (e.g., 3 boxes) doesn't get the same penalty as a 20-machine farm. + # We take the max so a miner in multiple overlapping clusters keeps + # the highest signal rather than summing them. + signal = min(1.0, count / 20.0 + 0.15) + for m in miners: + scores[m] = max(scores.get(m, 0.0), signal) + + # Solo miners or small groups: 0.0 + for sig in signals: + if sig["miner"] not in scores: + scores[sig["miner"]] = 0.0 + + return scores + + +def _detect_timing_correlation( + signals: List[dict] +) -> Dict[str, float]: + """ + Detect miners whose attestation timestamps are suspiciously synchronized. + + Fleet operators often update all miners in rapid succession. + Real independent operators attest at random times throughout the day. + """ + scores = {} + if len(signals) < FLEET_DETECTION_MINIMUM: + return {s["miner"]: 0.0 for s in signals} + + timestamps = [(s["miner"], s["attest_ts"]) for s in signals] + timestamps.sort(key=lambda x: x[1]) + + # O(n²) comparison is intentional: fleet epochs typically have <100 miners, + # so quadratic cost is negligible and we avoid false negatives from binning. + for i, (miner_a, ts_a) in enumerate(timestamps): + correlated = 0 + total_others = len(timestamps) - 1 + for j, (miner_b, ts_b) in enumerate(timestamps): + if i == j: + continue + if abs(ts_a - ts_b) <= FLEET_TIMING_WINDOW_S: + correlated += 1 + + if total_others > 0: + ratio = correlated / total_others + if ratio >= FLEET_TIMING_THRESHOLD: + # High correlation → fleet signal + scores[miner_a] = min(1.0, ratio) + else: + scores[miner_a] = 0.0 + else: + scores[miner_a] = 0.0 + + return scores + + +def _detect_fingerprint_similarity( + signals: List[dict] +) -> Dict[str, float]: + """ + Detect miners with suspiciously similar hardware fingerprints. + + Identical cache timing profiles, SIMD bias, or thermal signatures + across different "machines" indicate shared hardware or VMs on same host. + """ + scores = {} + if len(signals) < FLEET_DETECTION_MINIMUM: + return {s["miner"]: 0.0 for s in signals} + + # Build similarity groups from hash matches + # Miners sharing 2+ fingerprint hashes are likely same hardware + for i, sig_a in enumerate(signals): + matches = 0 + match_count = 0 + + for j, sig_b in enumerate(signals): + if i == j: + continue + + shared_hashes = 0 + total_hashes = 0 + + # Compare cache timing hash + if sig_a.get("cache_latency_hash") and sig_b.get("cache_latency_hash"): + total_hashes += 1 + if sig_a["cache_latency_hash"] == sig_b["cache_latency_hash"]: + shared_hashes += 1 + + # Compare SIMD bias hash + if sig_a.get("simd_bias_hash") and sig_b.get("simd_bias_hash"): + total_hashes += 1 + if sig_a["simd_bias_hash"] == sig_b["simd_bias_hash"]: + shared_hashes += 1 + + # Compare clock drift CV (within 5% = suspiciously similar) + if sig_a.get("clock_drift_cv") and sig_b.get("clock_drift_cv"): + total_hashes += 1 + cv_a, cv_b = sig_a["clock_drift_cv"], sig_b["clock_drift_cv"] + if cv_b > 0 and abs(cv_a - cv_b) / cv_b < 0.05: + shared_hashes += 1 + + # Compare thermal signature (within 10%) + if sig_a.get("thermal_signature") and sig_b.get("thermal_signature"): + total_hashes += 1 + th_a, th_b = sig_a["thermal_signature"], sig_b["thermal_signature"] + if th_b > 0 and abs(th_a - th_b) / th_b < 0.10: + shared_hashes += 1 + + # Require 2+ matching hashes to avoid false positives from a single + # shared data-centre NTP server inflating clock_drift_cv similarity. + if total_hashes >= 2 and shared_hashes >= 2: + matches += 1 + + # Signal scales with match count: 1→0.35, 2→0.50, 5→0.95, 6+→1.0 + if matches > 0: + scores[sig_a["miner"]] = min(1.0, 0.2 + matches * 0.15) + else: + scores[sig_a["miner"]] = 0.0 + + return scores + + +def compute_fleet_scores( + db: sqlite3.Connection, + epoch: int +) -> Dict[str, float]: + """ + Run all fleet detection algorithms and produce composite fleet scores. + + Returns: {miner_id: fleet_score} where 0.0=solo, 1.0=definite fleet + """ + ensure_schema(db) + + # Fetch signals for this epoch + rows = db.execute(""" + SELECT miner, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash + FROM fleet_signals + WHERE epoch = ? + """, (epoch,)).fetchall() + + if not rows or len(rows) < FLEET_DETECTION_MINIMUM: + # Not enough miners to detect fleets — everyone is solo + return {row[0]: 0.0 for row in rows} + + signals = [] + for row in rows: + signals.append({ + "miner": row[0], + "subnet_hash": row[1], + "attest_ts": row[2], + "clock_drift_cv": row[3], + "cache_latency_hash": row[4], + "thermal_signature": row[5], + "simd_bias_hash": row[6], + }) + + # Run detection algorithms + ip_scores = _detect_ip_clustering(signals) + timing_scores = _detect_timing_correlation(signals) + fingerprint_scores = _detect_fingerprint_similarity(signals) + + # Composite score: weighted average of signals + # IP clustering is strongest signal (hard to fake different subnets) + # Fingerprint similarity is second (hardware-level evidence) + # Timing correlation is supplementary (could be coincidental) + composite = {} + for sig in signals: + m = sig["miner"] + ip = ip_scores.get(m, 0.0) + timing = timing_scores.get(m, 0.0) + fp = fingerprint_scores.get(m, 0.0) + + # Weighted composite: IP 40%, fingerprint 40%, timing 20% + score = (ip * 0.4) + (fp * 0.4) + (timing * 0.2) + + # Corroboration boost: when two independent signals both fire above 0.3 + # it is far less likely to be coincidence, so we amplify by 30%. + # Capped at 1.0 to keep the score a unit probability. + fired = sum(1 for s in [ip, fp, timing] if s > 0.3) + if fired >= 2: + score = min(1.0, score * 1.3) + + composite[m] = round(score, 4) + + # Record to DB for audit trail + db.execute(""" + INSERT OR REPLACE INTO fleet_scores + (miner, epoch, fleet_score, ip_signal, timing_signal, + fingerprint_signal) + VALUES (?, ?, ?, ?, ?, ?) + """, (m, epoch, composite[m], ip, timing, fp)) + + db.commit() + return composite + + +# ═══════════════════════════════════════════════════════════ +# BUCKET NORMALIZATION +# ═══════════════════════════════════════════════════════════ + +def classify_miner_bucket(device_arch: str) -> str: + """Map a device architecture to its hardware bucket.""" + return ARCH_TO_BUCKET.get(device_arch.lower(), "modern") + + +def compute_bucket_pressure( + miners: List[Tuple[str, str, float]], + epoch: int, + db: Optional[sqlite3.Connection] = None +) -> Dict[str, float]: + """ + Compute pressure factors for each hardware bucket. + + If a bucket is overrepresented (more miners than its fair share), + its pressure factor drops below 1.0 — reducing rewards for that class. + Underrepresented buckets get boosted above 1.0. + + Args: + miners: List of (miner_id, device_arch, base_weight) tuples + epoch: Current epoch number + db: Optional DB connection for recording + + Returns: + {bucket_name: pressure_factor} + """ + # Count miners and total weight per bucket + bucket_counts = defaultdict(int) + bucket_weights = defaultdict(float) + bucket_miners = defaultdict(list) + + for miner_id, arch, weight in miners: + bucket = classify_miner_bucket(arch) + bucket_counts[bucket] += 1 + bucket_weights[bucket] += weight + bucket_miners[bucket].append(miner_id) + + active_buckets = [b for b in bucket_counts if bucket_counts[b] > 0] + num_active = len(active_buckets) + + if num_active == 0: + return {} + + # Ideal: equal miner count per bucket + total_miners = sum(bucket_counts.values()) + ideal_per_bucket = total_miners / num_active + + pressure = {} + for bucket in active_buckets: + count = bucket_counts[bucket] + ratio = count / ideal_per_bucket # >1 = overrepresented, <1 = rare + + if ratio > 1.0: + # Harmonic diminishing returns: 1/(1 + s*(r-1)) where s=PRESSURE_STRENGTH. + # At s=0.5: ratio 2→0.67, ratio 5→0.44. Floor at BUCKET_MIN_WEIGHT + # to avoid completely zeroing out any single bucket. + factor = 1.0 / (1.0 + BUCKET_PRESSURE_STRENGTH * (ratio - 1.0)) + factor = max(BUCKET_MIN_WEIGHT, factor) + else: + # Underrepresented bucket: linear boost up to 1.5x to incentivise + # diversity without creating an extreme advantage for ultra-rare hardware. + factor = 1.0 + (1.0 - ratio) * 0.5 + factor = min(1.5, factor) + + pressure[bucket] = round(factor, 4) + + # Record to DB + if db: + try: + db.execute(""" + INSERT OR REPLACE INTO bucket_pressure + (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) + VALUES (?, ?, ?, ?, ?, ?) + """, (epoch, bucket, count, bucket_weights[bucket], + factor, bucket_weights[bucket] * factor)) + except Exception: + pass # Non-critical recording + + if db: + try: + db.commit() + except Exception: + pass + + return pressure + + +# ═══════════════════════════════════════════════════════════ +# IMMUNE-ADJUSTED REWARD CALCULATION +# ═══════════════════════════════════════════════════════════ + +def apply_fleet_decay( + base_multiplier: float, + fleet_score: float +) -> float: + """ + Apply fleet detection decay to a miner's base multiplier. + + fleet_score 0.0 → no decay (solo miner) + fleet_score 1.0 → maximum decay (confirmed fleet) + + Formula: effective = base × (1.0 - fleet_score × DECAY_COEFF) + Floor: Never below FLEET_SCORE_FLOOR × base + + Examples (base=2.5 G4): + fleet_score=0.0 → 2.5 (solo miner, full bonus) + fleet_score=0.3 → 2.2 (some fleet signals) + fleet_score=0.7 → 1.8 (strong fleet signals) + fleet_score=1.0 → 1.5 (confirmed fleet, 40% decay) + """ + decay = fleet_score * FLEET_DECAY_COEFF + effective = base_multiplier * (1.0 - decay) + floor = base_multiplier * FLEET_SCORE_FLOOR + return max(floor, effective) + + +def calculate_immune_rewards_equal_split( + db: sqlite3.Connection, + epoch: int, + miners: List[Tuple[str, str]], + chain_age_years: float, + total_reward_urtc: int +) -> Dict[str, int]: + """ + Calculate rewards using equal bucket split (RECOMMENDED mode). + + The pot is divided EQUALLY among active hardware buckets. + Within each bucket, miners share their slice by time-aged weight. + Fleet members get decayed multipliers WITHIN their bucket. + + This is the nuclear option against fleet attacks: + - 500 modern boxes share 1/N of the pot (where N = active buckets) + - 1 solo G4 gets 1/N of the pot all to itself + - The fleet operator's $5M in hardware earns the same TOTAL as one G4 + + Args: + db: Database connection + epoch: Epoch being settled + miners: List of (miner_id, device_arch) tuples + chain_age_years: Chain age for time-aging + total_reward_urtc: Total uRTC to distribute + + Returns: + {miner_id: reward_urtc} + """ + from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier + + if not miners: + return {} + + # Step 1: Fleet detection + fleet_scores = compute_fleet_scores(db, epoch) + + # Step 2: Classify miners into buckets with fleet-decayed weights + buckets = defaultdict(list) # bucket → [(miner_id, decayed_weight)] + + for miner_id, arch in miners: + base = get_time_aged_multiplier(arch, chain_age_years) + fleet_score = fleet_scores.get(miner_id, 0.0) + effective = apply_fleet_decay(base, fleet_score) + bucket = classify_miner_bucket(arch) + buckets[bucket].append((miner_id, effective)) + + # Record + db.execute(""" + UPDATE fleet_scores SET effective_multiplier = ? + WHERE miner = ? AND epoch = ? + """, (effective, miner_id, epoch)) + + # Step 3: Split pot equally among active buckets + active_buckets = {b: members for b, members in buckets.items() if members} + num_buckets = len(active_buckets) + + if num_buckets == 0: + return {} + + # Integer division leaves rounding dust; we track it and assign it to the + # last bucket so no uRTC is ever lost from the epoch reward pool. + pot_per_bucket = total_reward_urtc // num_buckets + remainder = total_reward_urtc - (pot_per_bucket * num_buckets) + + # Step 4: Distribute within each bucket by weight + rewards = {} + bucket_index = 0 + + for bucket, members in active_buckets.items(): + # Last bucket gets remainder (rounding dust) + bucket_pot = pot_per_bucket + (remainder if bucket_index == num_buckets - 1 else 0) + + total_weight = sum(w for _, w in members) + if total_weight <= 0: + # Edge case: all weights zero (shouldn't happen) + per_miner = bucket_pot // len(members) + for miner_id, _ in members: + rewards[miner_id] = per_miner + else: + remaining = bucket_pot + for i, (miner_id, weight) in enumerate(members): + if i == len(members) - 1: + share = remaining + else: + share = int((weight / total_weight) * bucket_pot) + remaining -= share + rewards[miner_id] = share + + # Record bucket pressure data + try: + db.execute(""" + INSERT OR REPLACE INTO bucket_pressure + (epoch, bucket, miner_count, raw_weight, pressure_factor, adjusted_weight) + VALUES (?, ?, ?, ?, ?, ?) + """, (epoch, bucket, len(members), total_weight, + 1.0 / num_buckets, bucket_pot / total_reward_urtc if total_reward_urtc > 0 else 0)) + except Exception: + pass + + bucket_index += 1 + + db.commit() + return rewards + + +def calculate_immune_weights( + db: sqlite3.Connection, + epoch: int, + miners: List[Tuple[str, str]], + chain_age_years: float, + total_reward_urtc: int = 0 +) -> Dict[str, float]: + """ + Calculate immune-system-adjusted weights for epoch reward distribution. + + Main entry point. Dispatches to equal_split or pressure mode based on config. + + When BUCKET_MODE = "equal_split" and total_reward_urtc is provided, + returns {miner_id: reward_urtc} (integer rewards, ready to credit). + + When BUCKET_MODE = "pressure", returns {miner_id: adjusted_weight} + (float weights for pro-rata distribution by caller). + + Args: + db: Database connection + epoch: Epoch being settled + miners: List of (miner_id, device_arch) tuples + chain_age_years: Chain age for time-aging calculation + total_reward_urtc: Total reward in uRTC (required for equal_split mode) + + Returns: + {miner_id: value} — either reward_urtc (int) or weight (float) + """ + if BUCKET_MODE == "equal_split" and total_reward_urtc > 0: + return calculate_immune_rewards_equal_split( + db, epoch, miners, chain_age_years, total_reward_urtc + ) + + # Fallback: pressure mode (original behavior) + from rip_200_round_robin_1cpu1vote import get_time_aged_multiplier + + if not miners: + return {} + + # Step 1: Base time-aged multipliers + base_weights = [] + for miner_id, arch in miners: + base = get_time_aged_multiplier(arch, chain_age_years) + base_weights.append((miner_id, arch, base)) + + # Step 2: Fleet detection + fleet_scores = compute_fleet_scores(db, epoch) + + # Step 3: Apply fleet decay + decayed_weights = [] + for miner_id, arch, base in base_weights: + score = fleet_scores.get(miner_id, 0.0) + effective = apply_fleet_decay(base, score) + decayed_weights.append((miner_id, arch, effective)) + + db.execute(""" + UPDATE fleet_scores SET effective_multiplier = ? + WHERE miner = ? AND epoch = ? + """, (effective, miner_id, epoch)) + + # Step 4: Bucket pressure normalization + pressure = compute_bucket_pressure(decayed_weights, epoch, db) + + # Step 5: Apply pressure to get final weights + final_weights = {} + for miner_id, arch, weight in decayed_weights: + bucket = classify_miner_bucket(arch) + bucket_factor = pressure.get(bucket, 1.0) + final_weights[miner_id] = weight * bucket_factor + + db.commit() + return final_weights + + +# ═══════════════════════════════════════════════════════════ +# ADMIN / DIAGNOSTIC ENDPOINTS +# ═══════════════════════════════════════════════════════════ + +def get_fleet_report(db: sqlite3.Connection, epoch: int) -> dict: + """Generate a human-readable fleet detection report for an epoch.""" + ensure_schema(db) + + scores = db.execute(""" + SELECT miner, fleet_score, ip_signal, timing_signal, + fingerprint_signal, effective_multiplier + FROM fleet_scores WHERE epoch = ? + ORDER BY fleet_score DESC + """, (epoch,)).fetchall() + + pressure = db.execute(""" + SELECT bucket, miner_count, pressure_factor, raw_weight, adjusted_weight + FROM bucket_pressure WHERE epoch = ? + """, (epoch,)).fetchall() + + flagged = [s for s in scores if s[1] > 0.3] + + return { + "epoch": epoch, + "total_miners": len(scores), + "flagged_miners": len(flagged), + "fleet_scores": [ + { + "miner": s[0], + "fleet_score": s[1], + "signals": { + "ip_clustering": s[2], + "timing_correlation": s[3], + "fingerprint_similarity": s[4] + }, + "effective_multiplier": s[5] + } + for s in scores + ], + "bucket_pressure": [ + { + "bucket": p[0], + "miner_count": p[1], + "pressure_factor": p[2], + "raw_weight": p[3], + "adjusted_weight": p[4] + } + for p in pressure + ] + } + + +def register_fleet_endpoints(app, DB_PATH): + """Register Flask endpoints for fleet immune system admin.""" + from flask import request, jsonify + + @app.route('/admin/fleet/report', methods=['GET']) + def fleet_report(): + admin_key = request.headers.get("X-Admin-Key", "") + import os + if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): + return jsonify({"error": "Unauthorized"}), 401 + + epoch = request.args.get('epoch', type=int) + if epoch is None: + from rewards_implementation_rip200 import current_slot, slot_to_epoch + epoch = slot_to_epoch(current_slot()) - 1 + + with sqlite3.connect(DB_PATH) as db: + report = get_fleet_report(db, epoch) + return jsonify(report) + + @app.route('/admin/fleet/scores', methods=['GET']) + def fleet_scores(): + admin_key = request.headers.get("X-Admin-Key", "") + import os + if admin_key != os.environ.get("RC_ADMIN_KEY", "rustchain_admin_key_2025_secure64"): + return jsonify({"error": "Unauthorized"}), 401 + + miner = request.args.get('miner') + limit = request.args.get('limit', 10, type=int) + + with sqlite3.connect(DB_PATH) as db: + if miner: + rows = db.execute(""" + SELECT epoch, fleet_score, ip_signal, timing_signal, + fingerprint_signal, effective_multiplier + FROM fleet_scores WHERE miner = ? + ORDER BY epoch DESC LIMIT ? + """, (miner, limit)).fetchall() + else: + rows = db.execute(""" + SELECT miner, epoch, fleet_score, ip_signal, + timing_signal, fingerprint_signal + FROM fleet_scores + WHERE fleet_score > 0.3 + ORDER BY fleet_score DESC LIMIT ? + """, (limit,)).fetchall() + + return jsonify({"scores": [dict(zip( + ["miner", "epoch", "fleet_score", "ip_signal", + "timing_signal", "fingerprint_signal"], r + )) for r in rows]}) + + print("[RIP-201] Fleet immune system endpoints registered") + + +# ═══════════════════════════════════════════════════════════ +# SELF-TEST +# ═══════════════════════════════════════════════════════════ + +if __name__ == "__main__": + print("=" * 60) + print("RIP-201: Fleet Detection Immune System — Self Test") + print("=" * 60) + + # Create in-memory DB + db = sqlite3.connect(":memory:") + ensure_schema(db) + + # Also need miner_attest_recent for the full pipeline + db.execute(""" + CREATE TABLE IF NOT EXISTS miner_attest_recent ( + miner TEXT PRIMARY KEY, + ts_ok INTEGER NOT NULL, + device_family TEXT, + device_arch TEXT, + entropy_score REAL DEFAULT 0.0, + fingerprint_passed INTEGER DEFAULT 0 + ) + """) + + EPOCH = 100 + + # ─── Scenario 1: Healthy diverse network ─── + print("\n--- Scenario 1: Healthy Diverse Network (8 unique miners) ---") + + healthy_miners = [ + ("g4-powerbook-115", "g4", "10.1.1", 1000, 0.092, "cache_a", 0.45, "simd_a"), + ("dual-g4-125", "g4", "10.1.2", 1200, 0.088, "cache_b", 0.52, "simd_b"), + ("ppc-g5-130", "g5", "10.2.1", 1500, 0.105, "cache_c", 0.38, "simd_c"), + ("victus-x86", "modern", "192.168.0", 2000, 0.049, "cache_d", 0.61, "simd_d"), + ("sophia-nas", "modern", "192.168.1", 2300, 0.055, "cache_e", 0.58, "simd_e"), + ("mac-mini-m2", "apple_silicon", "10.3.1", 3000, 0.033, "cache_f", 0.42, "simd_f"), + ("power8-server", "power8", "10.4.1", 4000, 0.071, "cache_g", 0.55, "simd_g"), + ("ryan-factorio", "modern", "76.8.228", 5000, 0.044, "cache_h", 0.63, "simd_h"), + ] + + for m, arch, subnet, ts, cv, cache, thermal, simd in healthy_miners: + subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] + db.execute(""" + INSERT OR REPLACE INTO fleet_signals + (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, (m, EPOCH, subnet_hash, ts, cv, cache, thermal, simd)) + + db.commit() + scores = compute_fleet_scores(db, EPOCH) + + print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") + print(f" {'─'*25} {'─'*12} {'─'*15}") + for m, arch, *_ in healthy_miners: + s = scores.get(m, 0.0) + status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" + print(f" {m:<25} {s:>12.4f} {status:<15}") + + # ─── Scenario 2: Fleet attack (10 modern boxes, same subnet) ─── + print("\n--- Scenario 2: Fleet Attack (10 modern boxes, same /24) ---") + + EPOCH2 = 101 + fleet_miners = [] + + # 3 legitimate miners + fleet_miners.append(("g4-real-1", "g4", "10.1.1", 1000, 0.092, "cache_real1", 0.45, "simd_real1")) + fleet_miners.append(("g5-real-1", "g5", "10.2.1", 1800, 0.105, "cache_real2", 0.38, "simd_real2")) + fleet_miners.append(("m2-real-1", "apple_silicon", "10.3.1", 2500, 0.033, "cache_real3", 0.42, "simd_real3")) + + # 10 fleet miners — same subnet, similar timing, similar fingerprints + for i in range(10): + fleet_miners.append(( + f"fleet-box-{i}", + "modern", + "203.0.113", # All same /24 subnet + 3000 + i * 5, # Attestation within 50s of each other + 0.048 + i * 0.001, # Nearly identical clock drift + "cache_fleet_shared", # SAME cache timing hash + 0.60 + i * 0.005, # Very similar thermal signatures + "simd_fleet_shared", # SAME SIMD hash + )) + + for m, arch, subnet, ts, cv, cache, thermal, simd in fleet_miners: + subnet_hash = hashlib.sha256(subnet.encode()).hexdigest()[:16] + db.execute(""" + INSERT OR REPLACE INTO fleet_signals + (miner, epoch, subnet_hash, attest_ts, clock_drift_cv, + cache_latency_hash, thermal_signature, simd_bias_hash) + VALUES (?, ?, ?, ?, ?, ?, ?, ?) + """, (m, EPOCH2, subnet_hash, ts, cv, cache, thermal, simd)) + + db.commit() + scores2 = compute_fleet_scores(db, EPOCH2) + + print(f" {'Miner':<25} {'Fleet Score':>12} {'Status':<15}") + print(f" {'─'*25} {'─'*12} {'─'*15}") + for m, arch, *_ in fleet_miners: + s = scores2.get(m, 0.0) + status = "CLEAN" if s < 0.3 else "FLAGGED" if s < 0.7 else "FLEET" + print(f" {m:<25} {s:>12.4f} {status:<15}") + + # ─── Scenario 3: Bucket pressure ─── + print("\n--- Scenario 3: Bucket Pressure (500 modern vs 3 vintage) ---") + + fleet_attack = [("g4-solo", "g4", 2.5), ("g5-solo", "g5", 2.0), ("g3-solo", "g3", 1.8)] + for i in range(500): + fleet_attack.append((f"modern-{i}", "modern", 1.0)) + + pressure = compute_bucket_pressure(fleet_attack, 200) + + print(f" {'Bucket':<20} {'Pressure':>10} {'Effect':<30}") + print(f" {'─'*20} {'─'*10} {'─'*30}") + for bucket, factor in sorted(pressure.items(), key=lambda x: x[1]): + if factor < 1.0: + effect = f"FLATTENED (each modern box worth {factor:.2f}x)" + elif factor > 1.0: + effect = f"BOOSTED (rare hardware bonus {factor:.2f}x)" + else: + effect = "neutral" + print(f" {bucket:<20} {factor:>10.4f} {effect:<30}") + + # ─── Scenario 4: Fleet decay on multipliers ─── + print("\n--- Scenario 4: Fleet Decay Examples ---") + + examples = [ + ("G4 (solo)", 2.5, 0.0), + ("G4 (mild fleet)", 2.5, 0.3), + ("G4 (strong fleet)", 2.5, 0.7), + ("G4 (confirmed fleet)", 2.5, 1.0), + ("Modern (solo)", 1.0, 0.0), + ("Modern (strong fleet)", 1.0, 0.7), + ("Modern (confirmed fleet)", 1.0, 1.0), + ] + + print(f" {'Miner Type':<25} {'Base':>6} {'Fleet':>7} {'Effective':>10} {'Decay':>8}") + print(f" {'─'*25} {'─'*6} {'─'*7} {'─'*10} {'─'*8}") + for name, base, score in examples: + eff = apply_fleet_decay(base, score) + decay_pct = (1.0 - eff/base) * 100 if base > 0 else 0 + print(f" {name:<25} {base:>6.2f} {score:>7.2f} {eff:>10.3f} {decay_pct:>7.1f}%") + + # ─── Combined effect ─── + print("\n--- Combined: 500 Modern Fleet vs 3 Vintage Solo ---") + print(" Without immune system:") + total_w_no_immune = 500 * 1.0 + 2.5 + 2.0 + 1.8 + g4_share = (2.5 / total_w_no_immune) * 1.5 + modern_total = (500 * 1.0 / total_w_no_immune) * 1.5 + modern_each = modern_total / 500 + print(f" G4 solo: {g4_share:.6f} RTC/epoch") + print(f" 500 modern fleet: {modern_total:.6f} RTC/epoch total ({modern_each:.8f} each)") + print(f" Fleet ROI: {modern_total/g4_share:.1f}x the G4 solo reward") + + print("\n With RIP-201 PRESSURE mode (soft):") + fleet_eff = apply_fleet_decay(1.0, 0.8) # ~0.68 + g4_eff = 2.5 # Solo, no decay + bucket_p_modern = compute_bucket_pressure( + [("g4", "g4", g4_eff), ("g5", "g5", 2.0), ("g3", "g3", 1.8)] + + [(f"m{i}", "modern", fleet_eff) for i in range(500)], + 999 + ) + modern_p = bucket_p_modern.get("modern", 1.0) + vintage_p = bucket_p_modern.get("vintage_powerpc", 1.0) + + g4_final = g4_eff * vintage_p + modern_final = fleet_eff * modern_p + total_w_immune = g4_final + 2.0 * vintage_p + 1.8 * vintage_p + 500 * modern_final + g4_share_immune = (g4_final / total_w_immune) * 1.5 + modern_total_immune = (500 * modern_final / total_w_immune) * 1.5 + modern_each_immune = modern_total_immune / 500 + + print(f" Fleet score: 0.80 → multiplier decay to {fleet_eff:.3f}") + print(f" Modern pressure: {modern_p:.4f} (bucket flattened)") + print(f" Vintage pressure: {vintage_p:.4f} (bucket boosted)") + print(f" G4 solo: {g4_share_immune:.6f} RTC/epoch") + print(f" 500 modern fleet: {modern_total_immune:.6f} RTC/epoch total ({modern_each_immune:.8f} each)") + print(f" Fleet ROI: {modern_total_immune/g4_share_immune:.1f}x the G4 solo reward") + + # ─── Equal Split mode (the real defense) ─── + print("\n With RIP-201 EQUAL SPLIT mode (RECOMMENDED):") + print(" Pot split: 1.5 RTC ÷ 2 active buckets = 0.75 RTC each") + + # In equal split: vintage_powerpc bucket gets 0.75 RTC, modern bucket gets 0.75 RTC + vintage_pot = 0.75 # RTC + modern_pot = 0.75 # RTC + + # Within vintage bucket: 3 miners split 0.75 by weight + vintage_total_w = 2.5 + 2.0 + 1.8 + g4_equal = (2.5 / vintage_total_w) * vintage_pot + g5_equal = (2.0 / vintage_total_w) * vintage_pot + g3_equal = (1.8 / vintage_total_w) * vintage_pot + + # Within modern bucket: 500 fleet miners split 0.75 by decayed weight + modern_each_equal = modern_pot / 500 # Equal weight within bucket (all modern) + + print(f" Vintage bucket (3 miners share 0.75 RTC):") + print(f" G4 solo: {g4_equal:.6f} RTC/epoch") + print(f" G5 solo: {g5_equal:.6f} RTC/epoch") + print(f" G3 solo: {g3_equal:.6f} RTC/epoch") + print(f" Modern bucket (500 fleet share 0.75 RTC):") + print(f" Each fleet box: {modern_each_equal:.8f} RTC/epoch") + print(f" Fleet ROI: {modern_pot/g4_equal:.1f}x the G4 solo reward (TOTAL fleet)") + print(f" Per-box ROI: {modern_each_equal/g4_equal:.4f}x (each fleet box vs G4)") + print(f" Fleet gets: {modern_pot/1.5*100:.0f}% of pot (was {modern_total/1.5*100:.0f}%)") + print(f" G4 earns: {g4_equal/g4_share:.0f}x more than without immune system") + + # ─── The economics ─── + print("\n === ECONOMIC IMPACT ===") + print(f" Without immune: 500 boxes earn {modern_total:.4f} RTC/epoch = {modern_total*365:.1f} RTC/year") + print(f" With equal split: 500 boxes earn {modern_pot:.4f} RTC/epoch = {modern_pot*365:.1f} RTC/year") + hardware_cost = 5_000_000 # $5M + rtc_value = 0.10 # $0.10/RTC + annual_no_immune = modern_total * 365 * rtc_value + annual_equal = modern_pot * 365 * rtc_value + years_to_roi_no = hardware_cost / annual_no_immune if annual_no_immune > 0 else float('inf') + years_to_roi_eq = hardware_cost / annual_equal if annual_equal > 0 else float('inf') + print(f" At $0.10/RTC, fleet annual revenue:") + print(f" No immune: ${annual_no_immune:,.2f}/year → ROI in {years_to_roi_no:,.0f} years") + print(f" Equal split: ${annual_equal:,.2f}/year → ROI in {years_to_roi_eq:,.0f} years") + print(f" A $5M hardware fleet NEVER pays for itself. Attack neutralized.") + + print("\n" + "=" * 60) + print("RIP-201 self-test complete.") + print("One of everything beats a hundred of one thing.") + print("=" * 60) diff --git a/rips/python/rustchain/proof_of_antiquity.py b/rips/python/rustchain/proof_of_antiquity.py index 00ab4d58..d9b9e2ab 100644 --- a/rips/python/rustchain/proof_of_antiquity.py +++ b/rips/python/rustchain/proof_of_antiquity.py @@ -1,457 +1,457 @@ -""" -RustChain Proof of Antiquity Consensus (RIP-0001) -================================================= - -Proof of Antiquity (PoA) is NOT Proof of Work! - -PoA rewards: -- Hardware age (older = better) -- Node uptime (longer = better) -- Hardware authenticity (verified via deep entropy) - -Formula: AS = (current_year - release_year) * log10(uptime_days + 1) -""" - -import hashlib -import math -import time -from dataclasses import dataclass, field -from typing import Dict, List, Optional, Tuple -from decimal import Decimal - -from .core_types import ( - HardwareInfo, - HardwareTier, - WalletAddress, - Block, - BlockMiner, - TokenAmount, - BLOCK_REWARD, - BLOCK_TIME_SECONDS, - CURRENT_YEAR, -) - - -# ============================================================================= -# Constants -# ============================================================================= - -AS_MAX: float = 100.0 # Maximum Antiquity Score for reward capping -AS_MIN: float = 1.0 # Minimum AS to participate in validation -MAX_MINERS_PER_BLOCK: int = 100 -BLOCK_REWARD_AMOUNT: TokenAmount = TokenAmount.from_rtc(float(BLOCK_REWARD)) - - -# ============================================================================= -# Antiquity Score Calculation -# ============================================================================= - -def calculate_antiquity_score(release_year: int, uptime_days: int) -> float: - """ - Calculate Antiquity Score per RIP-0001 spec. - - Formula: AS = (current_year - release_year) * log10(uptime_days + 1) - - Args: - release_year: Year the hardware was manufactured - uptime_days: Days since node started or last reboot - - Returns: - Antiquity Score (AS) - - Examples: - >>> calculate_antiquity_score(1992, 276) # 486 DX2 - 80.46 # (2025-1992) * log10(277) ≈ 33 * 2.44 - - >>> calculate_antiquity_score(2002, 276) # PowerPC G4 - 56.10 # (2025-2002) * log10(277) ≈ 23 * 2.44 - - >>> calculate_antiquity_score(2023, 30) # Modern CPU - 2.96 # (2025-2023) * log10(31) ≈ 2 * 1.49 - """ - age = max(0, CURRENT_YEAR - release_year) - # log10 gives diminishing returns on uptime: day 1→0, day 10→1, day 100→2, - # day 1000→3. This prevents a node that just rebooted from earning zero while - # also preventing infinite score growth for nodes with extreme uptime. - uptime_factor = math.log10(uptime_days + 1) - return age * uptime_factor - - -def calculate_reward(antiquity_score: float, total_reward: TokenAmount) -> TokenAmount: - """ - Calculate reward based on Antiquity Score per RIP-0001. - - Formula: Reward = R * min(1.0, AS / AS_max) - - Args: - antiquity_score: Node's AS value - total_reward: Total block reward pool - - Returns: - Calculated reward amount - """ - # Cap at AS_MAX so extremely old hardware (e.g., a 50-year-old mainframe) - # doesn't earn a disproportionate multiple of the block reward. - reward_factor = min(1.0, antiquity_score / AS_MAX) - reward_amount = int(total_reward.amount * reward_factor) - return TokenAmount(reward_amount) - - -# ============================================================================= -# Validated Proof -# ============================================================================= - -@dataclass -class ValidatedProof: - """A validated mining proof ready for block inclusion""" - wallet: WalletAddress - hardware: HardwareInfo - antiquity_score: float - anti_emulation_hash: str - validated_at: int - entropy_proof: Optional[bytes] = None - - def to_dict(self): - return { - "wallet": self.wallet.address, - "hardware": self.hardware.to_dict(), - "antiquity_score": self.antiquity_score, - "anti_emulation_hash": self.anti_emulation_hash, - "validated_at": self.validated_at, - } - - -# ============================================================================= -# Proof Errors -# ============================================================================= - -class ProofError(Exception): - """Base class for proof validation errors""" - pass - - -class BlockWindowClosedError(ProofError): - """Block window has closed""" - pass - - -class DuplicateSubmissionError(ProofError): - """Already submitted proof for this block""" - pass - - -class BlockFullError(ProofError): - """Block has reached maximum miners""" - pass - - -class InsufficientAntiquityError(ProofError): - """Antiquity Score below minimum threshold""" - pass - - -class HardwareAlreadyRegisteredError(ProofError): - """Hardware already registered to another wallet""" - pass - - -class EmulationDetectedError(ProofError): - """Emulation detected - hardware is not genuine""" - pass - - -class DriftLockViolationError(ProofError): - """Node behavior has drifted - quarantined per RIP-0003""" - pass - - -# ============================================================================= -# Proof of Antiquity Validator -# ============================================================================= - -class ProofOfAntiquity: - """ - Proof of Antiquity consensus validator. - - This is NOT Proof of Work! We validate: - 1. Hardware authenticity via deep entropy checks - 2. Hardware age via device signature database - 3. Node uptime via continuous validation - 4. No computational puzzles - just verification - - Block selection uses weighted lottery based on Antiquity Score. - """ - - def __init__(self): - self.pending_proofs: List[ValidatedProof] = [] - self.block_start_time: int = int(time.time()) - self.known_hardware: Dict[str, WalletAddress] = {} # hash -> wallet - self.drifted_nodes: set = set() # Quarantined nodes (RIP-0003) - self.current_block_height: int = 0 - - def submit_proof( - self, - wallet: WalletAddress, - hardware: HardwareInfo, - anti_emulation_hash: str, - entropy_proof: Optional[bytes] = None, - ) -> Dict: - """ - Submit a mining proof for the current block. - - Args: - wallet: Miner's wallet address - hardware: Hardware information - anti_emulation_hash: Hash from entropy verification - entropy_proof: Optional detailed entropy proof - - Returns: - Result dict with acceptance status - - Raises: - Various ProofError subclasses on validation failure - """ - current_time = int(time.time()) - elapsed = current_time - self.block_start_time - - # Check if block window is still open - if elapsed >= BLOCK_TIME_SECONDS: - raise BlockWindowClosedError("Block window has closed") - - # Drift lock (RIP-0003): nodes that exhibit behavioral anomalies (e.g., - # inconsistent entropy proofs across epochs) are quarantined here rather - # than in the network layer to ensure the block itself stays clean. - if wallet.address in self.drifted_nodes: - raise DriftLockViolationError( - f"Node {wallet.address} is quarantined due to drift lock" - ) - - # Check for duplicate wallet submission - existing = [p for p in self.pending_proofs if p.wallet == wallet] - if existing: - raise DuplicateSubmissionError( - "Already submitted proof for this block" - ) - - # Check max miners - if len(self.pending_proofs) >= MAX_MINERS_PER_BLOCK: - raise BlockFullError("Block has reached maximum miners") - - # Calculate Antiquity Score - antiquity_score = calculate_antiquity_score( - hardware.release_year, - hardware.uptime_days - ) - - # Check minimum AS threshold (RIP-0003) - if antiquity_score < AS_MIN: - raise InsufficientAntiquityError( - f"Antiquity Score {antiquity_score:.2f} below minimum {AS_MIN}" - ) - - # Check for duplicate hardware - hw_hash = hardware.generate_hardware_hash() - if hw_hash in self.known_hardware: - existing_wallet = self.known_hardware[hw_hash] - if existing_wallet != wallet: - raise HardwareAlreadyRegisteredError( - f"Hardware already registered to {existing_wallet.address}" - ) - - # Create validated proof - validated = ValidatedProof( - wallet=wallet, - hardware=hardware, - antiquity_score=antiquity_score, - anti_emulation_hash=anti_emulation_hash, - validated_at=current_time, - entropy_proof=entropy_proof, - ) - - self.pending_proofs.append(validated) - self.known_hardware[hw_hash] = wallet - - return { - "success": True, - "message": "Proof accepted, waiting for block completion", - "pending_miners": len(self.pending_proofs), - "your_antiquity_score": antiquity_score, - "your_tier": hardware.tier.value, - "block_completes_in": BLOCK_TIME_SECONDS - elapsed, - } - - def process_block(self, previous_hash: str) -> Optional[Block]: - """ - Process all pending proofs and create a new block. - - Uses weighted lottery based on Antiquity Score for validator selection. - - Args: - previous_hash: Hash of previous block - - Returns: - New block if proofs exist, None otherwise - """ - if not self.pending_proofs: - self._reset_block() - return None - - # Calculate total AS for weighted distribution - total_as = sum(p.antiquity_score for p in self.pending_proofs) - - # Calculate rewards for each miner (proportional to AS) - miners = [] - total_distributed = 0 - - for proof in self.pending_proofs: - # Normalize each miner's score to its proportional share of total AS, - # then scale by miner count so a lone miner with score=AS_MAX earns - # the same as `calculate_reward(AS_MAX, ...)` would independently. - share = proof.antiquity_score / total_as - reward = calculate_reward( - proof.antiquity_score * share * len(self.pending_proofs), - BLOCK_REWARD_AMOUNT - ) - total_distributed += reward.amount - - miners.append(BlockMiner( - wallet=proof.wallet, - hardware=proof.hardware.cpu_model, - antiquity_score=proof.antiquity_score, - reward=reward, - )) - - # Create new block - self.current_block_height += 1 - block = Block( - height=self.current_block_height, - timestamp=int(time.time()), - previous_hash=previous_hash, - miners=miners, - total_reward=TokenAmount(total_distributed), - ) - - print(f"⛏️ Block #{block.height} created! " - f"Reward: {block.total_reward.to_rtc()} RTC " - f"split among {len(miners)} miners") - - # Reset for next block - self._reset_block() - - return block - - def _reset_block(self): - """Reset state for next block""" - self.pending_proofs.clear() - self.block_start_time = int(time.time()) - - def get_status(self) -> Dict: - """Get current block status""" - elapsed = int(time.time()) - self.block_start_time - total_as = sum(p.antiquity_score for p in self.pending_proofs) - - return { - "current_block_height": self.current_block_height, - "pending_proofs": len(self.pending_proofs), - "total_antiquity_score": total_as, - "block_age_seconds": elapsed, - "time_remaining_seconds": max(0, BLOCK_TIME_SECONDS - elapsed), - "accepting_proofs": elapsed < BLOCK_TIME_SECONDS, - } - - def quarantine_node(self, wallet: WalletAddress, reason: str): - """ - Quarantine a node due to drift lock violation (RIP-0003). - - Args: - wallet: Node wallet to quarantine - reason: Reason for quarantine - """ - self.drifted_nodes.add(wallet.address) - print(f"🚫 Node {wallet.address} quarantined: {reason}") - - def release_node(self, wallet: WalletAddress): - """ - Release a node from quarantine after challenge passage (RIP-0003). - - Args: - wallet: Node wallet to release - """ - self.drifted_nodes.discard(wallet.address) - print(f"✅ Node {wallet.address} released from quarantine") - - -# ============================================================================= -# Validator Selection -# ============================================================================= - -def select_block_validator(proofs: List[ValidatedProof]) -> Optional[ValidatedProof]: - """ - Select block validator using weighted lottery (RIP-0001). - - Higher Antiquity Score = higher probability of selection. - - Args: - proofs: List of validated proofs - - Returns: - Selected validator's proof, or None if no proofs - """ - if not proofs: - return None - - import random - - total_as = sum(p.antiquity_score for p in proofs) - if total_as == 0: - return random.choice(proofs) - - # Weighted random selection via cumulative distribution: pick a random point - # on [0, total_as] and return the proof whose range contains it. - # The last proof is returned as a fallback for floating-point rounding where - # cumulative may fall just short of total_as. - r = random.uniform(0, total_as) - cumulative = 0 - - for proof in proofs: - cumulative += proof.antiquity_score - if r <= cumulative: - return proof - - return proofs[-1] - - -# ============================================================================= -# Example Usage -# ============================================================================= - -if __name__ == "__main__": - # Demo: Calculate AS for different hardware - examples = [ - ("Intel 486 DX2-66", 1992, 276), - ("PowerPC G4", 2002, 276), - ("Core 2 Duo", 2006, 180), - ("Ryzen 9 7950X", 2022, 30), - ] - - print("=" * 60) - print("RUSTCHAIN PROOF OF ANTIQUITY - ANTIQUITY SCORE CALCULATOR") - print("=" * 60) - print(f"Formula: AS = (2025 - release_year) * log10(uptime_days + 1)") - print("=" * 60) - print() - - for model, year, uptime in examples: - hw = HardwareInfo(cpu_model=model, release_year=year, uptime_days=uptime) - as_score = calculate_antiquity_score(year, uptime) - tier = HardwareTier.from_release_year(year) - - print(f"📟 {model} ({year})") - print(f" Age: {CURRENT_YEAR - year} years") - print(f" Uptime: {uptime} days") - print(f" Tier: {tier.value.upper()} ({tier.multiplier}x)") - print(f" Antiquity Score: {as_score:.2f}") - print() - - print("💡 Remember: This is NOT Proof of Work!") - print(" Older hardware with longer uptime wins, not faster hardware.") +""" +RustChain Proof of Antiquity Consensus (RIP-0001) +================================================= + +Proof of Antiquity (PoA) is NOT Proof of Work! + +PoA rewards: +- Hardware age (older = better) +- Node uptime (longer = better) +- Hardware authenticity (verified via deep entropy) + +Formula: AS = (current_year - release_year) * log10(uptime_days + 1) +""" + +import hashlib +import math +import time +from dataclasses import dataclass, field +from typing import Dict, List, Optional, Tuple +from decimal import Decimal + +from .core_types import ( + HardwareInfo, + HardwareTier, + WalletAddress, + Block, + BlockMiner, + TokenAmount, + BLOCK_REWARD, + BLOCK_TIME_SECONDS, + CURRENT_YEAR, +) + + +# ============================================================================= +# Constants +# ============================================================================= + +AS_MAX: float = 100.0 # Maximum Antiquity Score for reward capping +AS_MIN: float = 1.0 # Minimum AS to participate in validation +MAX_MINERS_PER_BLOCK: int = 100 +BLOCK_REWARD_AMOUNT: TokenAmount = TokenAmount.from_rtc(float(BLOCK_REWARD)) + + +# ============================================================================= +# Antiquity Score Calculation +# ============================================================================= + +def calculate_antiquity_score(release_year: int, uptime_days: int) -> float: + """ + Calculate Antiquity Score per RIP-0001 spec. + + Formula: AS = (current_year - release_year) * log10(uptime_days + 1) + + Args: + release_year: Year the hardware was manufactured + uptime_days: Days since node started or last reboot + + Returns: + Antiquity Score (AS) + + Examples: + >>> calculate_antiquity_score(1992, 276) # 486 DX2 + 80.46 # (2025-1992) * log10(277) ≈ 33 * 2.44 + + >>> calculate_antiquity_score(2002, 276) # PowerPC G4 + 56.10 # (2025-2002) * log10(277) ≈ 23 * 2.44 + + >>> calculate_antiquity_score(2023, 30) # Modern CPU + 2.96 # (2025-2023) * log10(31) ≈ 2 * 1.49 + """ + age = max(0, CURRENT_YEAR - release_year) + # log10 gives diminishing returns on uptime: day 1→0, day 10→1, day 100→2, + # day 1000→3. This prevents a node that just rebooted from earning zero while + # also preventing infinite score growth for nodes with extreme uptime. + uptime_factor = math.log10(uptime_days + 1) + return age * uptime_factor + + +def calculate_reward(antiquity_score: float, total_reward: TokenAmount) -> TokenAmount: + """ + Calculate reward based on Antiquity Score per RIP-0001. + + Formula: Reward = R * min(1.0, AS / AS_max) + + Args: + antiquity_score: Node's AS value + total_reward: Total block reward pool + + Returns: + Calculated reward amount + """ + # Cap at AS_MAX so extremely old hardware (e.g., a 50-year-old mainframe) + # doesn't earn a disproportionate multiple of the block reward. + reward_factor = min(1.0, antiquity_score / AS_MAX) + reward_amount = int(total_reward.amount * reward_factor) + return TokenAmount(reward_amount) + + +# ============================================================================= +# Validated Proof +# ============================================================================= + +@dataclass +class ValidatedProof: + """A validated mining proof ready for block inclusion""" + wallet: WalletAddress + hardware: HardwareInfo + antiquity_score: float + anti_emulation_hash: str + validated_at: int + entropy_proof: Optional[bytes] = None + + def to_dict(self): + return { + "wallet": self.wallet.address, + "hardware": self.hardware.to_dict(), + "antiquity_score": self.antiquity_score, + "anti_emulation_hash": self.anti_emulation_hash, + "validated_at": self.validated_at, + } + + +# ============================================================================= +# Proof Errors +# ============================================================================= + +class ProofError(Exception): + """Base class for proof validation errors""" + pass + + +class BlockWindowClosedError(ProofError): + """Block window has closed""" + pass + + +class DuplicateSubmissionError(ProofError): + """Already submitted proof for this block""" + pass + + +class BlockFullError(ProofError): + """Block has reached maximum miners""" + pass + + +class InsufficientAntiquityError(ProofError): + """Antiquity Score below minimum threshold""" + pass + + +class HardwareAlreadyRegisteredError(ProofError): + """Hardware already registered to another wallet""" + pass + + +class EmulationDetectedError(ProofError): + """Emulation detected - hardware is not genuine""" + pass + + +class DriftLockViolationError(ProofError): + """Node behavior has drifted - quarantined per RIP-0003""" + pass + + +# ============================================================================= +# Proof of Antiquity Validator +# ============================================================================= + +class ProofOfAntiquity: + """ + Proof of Antiquity consensus validator. + + This is NOT Proof of Work! We validate: + 1. Hardware authenticity via deep entropy checks + 2. Hardware age via device signature database + 3. Node uptime via continuous validation + 4. No computational puzzles - just verification + + Block selection uses weighted lottery based on Antiquity Score. + """ + + def __init__(self): + self.pending_proofs: List[ValidatedProof] = [] + self.block_start_time: int = int(time.time()) + self.known_hardware: Dict[str, WalletAddress] = {} # hash -> wallet + self.drifted_nodes: set = set() # Quarantined nodes (RIP-0003) + self.current_block_height: int = 0 + + def submit_proof( + self, + wallet: WalletAddress, + hardware: HardwareInfo, + anti_emulation_hash: str, + entropy_proof: Optional[bytes] = None, + ) -> Dict: + """ + Submit a mining proof for the current block. + + Args: + wallet: Miner's wallet address + hardware: Hardware information + anti_emulation_hash: Hash from entropy verification + entropy_proof: Optional detailed entropy proof + + Returns: + Result dict with acceptance status + + Raises: + Various ProofError subclasses on validation failure + """ + current_time = int(time.time()) + elapsed = current_time - self.block_start_time + + # Check if block window is still open + if elapsed >= BLOCK_TIME_SECONDS: + raise BlockWindowClosedError("Block window has closed") + + # Drift lock (RIP-0003): nodes that exhibit behavioral anomalies (e.g., + # inconsistent entropy proofs across epochs) are quarantined here rather + # than in the network layer to ensure the block itself stays clean. + if wallet.address in self.drifted_nodes: + raise DriftLockViolationError( + f"Node {wallet.address} is quarantined due to drift lock" + ) + + # Check for duplicate wallet submission + existing = [p for p in self.pending_proofs if p.wallet == wallet] + if existing: + raise DuplicateSubmissionError( + "Already submitted proof for this block" + ) + + # Check max miners + if len(self.pending_proofs) >= MAX_MINERS_PER_BLOCK: + raise BlockFullError("Block has reached maximum miners") + + # Calculate Antiquity Score + antiquity_score = calculate_antiquity_score( + hardware.release_year, + hardware.uptime_days + ) + + # Check minimum AS threshold (RIP-0003) + if antiquity_score < AS_MIN: + raise InsufficientAntiquityError( + f"Antiquity Score {antiquity_score:.2f} below minimum {AS_MIN}" + ) + + # Check for duplicate hardware + hw_hash = hardware.generate_hardware_hash() + if hw_hash in self.known_hardware: + existing_wallet = self.known_hardware[hw_hash] + if existing_wallet != wallet: + raise HardwareAlreadyRegisteredError( + f"Hardware already registered to {existing_wallet.address}" + ) + + # Create validated proof + validated = ValidatedProof( + wallet=wallet, + hardware=hardware, + antiquity_score=antiquity_score, + anti_emulation_hash=anti_emulation_hash, + validated_at=current_time, + entropy_proof=entropy_proof, + ) + + self.pending_proofs.append(validated) + self.known_hardware[hw_hash] = wallet + + return { + "success": True, + "message": "Proof accepted, waiting for block completion", + "pending_miners": len(self.pending_proofs), + "your_antiquity_score": antiquity_score, + "your_tier": hardware.tier.value, + "block_completes_in": BLOCK_TIME_SECONDS - elapsed, + } + + def process_block(self, previous_hash: str) -> Optional[Block]: + """ + Process all pending proofs and create a new block. + + Uses weighted lottery based on Antiquity Score for validator selection. + + Args: + previous_hash: Hash of previous block + + Returns: + New block if proofs exist, None otherwise + """ + if not self.pending_proofs: + self._reset_block() + return None + + # Calculate total AS for weighted distribution + total_as = sum(p.antiquity_score for p in self.pending_proofs) + + # Calculate rewards for each miner (proportional to AS) + miners = [] + total_distributed = 0 + + for proof in self.pending_proofs: + # Normalize each miner's score to its proportional share of total AS, + # then scale by miner count so a lone miner with score=AS_MAX earns + # the same as `calculate_reward(AS_MAX, ...)` would independently. + share = proof.antiquity_score / total_as + reward = calculate_reward( + proof.antiquity_score * share * len(self.pending_proofs), + BLOCK_REWARD_AMOUNT + ) + total_distributed += reward.amount + + miners.append(BlockMiner( + wallet=proof.wallet, + hardware=proof.hardware.cpu_model, + antiquity_score=proof.antiquity_score, + reward=reward, + )) + + # Create new block + self.current_block_height += 1 + block = Block( + height=self.current_block_height, + timestamp=int(time.time()), + previous_hash=previous_hash, + miners=miners, + total_reward=TokenAmount(total_distributed), + ) + + print(f"⛏️ Block #{block.height} created! " + f"Reward: {block.total_reward.to_rtc()} RTC " + f"split among {len(miners)} miners") + + # Reset for next block + self._reset_block() + + return block + + def _reset_block(self): + """Reset state for next block""" + self.pending_proofs.clear() + self.block_start_time = int(time.time()) + + def get_status(self) -> Dict: + """Get current block status""" + elapsed = int(time.time()) - self.block_start_time + total_as = sum(p.antiquity_score for p in self.pending_proofs) + + return { + "current_block_height": self.current_block_height, + "pending_proofs": len(self.pending_proofs), + "total_antiquity_score": total_as, + "block_age_seconds": elapsed, + "time_remaining_seconds": max(0, BLOCK_TIME_SECONDS - elapsed), + "accepting_proofs": elapsed < BLOCK_TIME_SECONDS, + } + + def quarantine_node(self, wallet: WalletAddress, reason: str): + """ + Quarantine a node due to drift lock violation (RIP-0003). + + Args: + wallet: Node wallet to quarantine + reason: Reason for quarantine + """ + self.drifted_nodes.add(wallet.address) + print(f"🚫 Node {wallet.address} quarantined: {reason}") + + def release_node(self, wallet: WalletAddress): + """ + Release a node from quarantine after challenge passage (RIP-0003). + + Args: + wallet: Node wallet to release + """ + self.drifted_nodes.discard(wallet.address) + print(f"✅ Node {wallet.address} released from quarantine") + + +# ============================================================================= +# Validator Selection +# ============================================================================= + +def select_block_validator(proofs: List[ValidatedProof]) -> Optional[ValidatedProof]: + """ + Select block validator using weighted lottery (RIP-0001). + + Higher Antiquity Score = higher probability of selection. + + Args: + proofs: List of validated proofs + + Returns: + Selected validator's proof, or None if no proofs + """ + if not proofs: + return None + + import random + + total_as = sum(p.antiquity_score for p in proofs) + if total_as == 0: + return random.choice(proofs) + + # Weighted random selection via cumulative distribution: pick a random point + # on [0, total_as] and return the proof whose range contains it. + # The last proof is returned as a fallback for floating-point rounding where + # cumulative may fall just short of total_as. + r = random.uniform(0, total_as) + cumulative = 0 + + for proof in proofs: + cumulative += proof.antiquity_score + if r <= cumulative: + return proof + + return proofs[-1] + + +# ============================================================================= +# Example Usage +# ============================================================================= + +if __name__ == "__main__": + # Demo: Calculate AS for different hardware + examples = [ + ("Intel 486 DX2-66", 1992, 276), + ("PowerPC G4", 2002, 276), + ("Core 2 Duo", 2006, 180), + ("Ryzen 9 7950X", 2022, 30), + ] + + print("=" * 60) + print("RUSTCHAIN PROOF OF ANTIQUITY - ANTIQUITY SCORE CALCULATOR") + print("=" * 60) + print(f"Formula: AS = (2025 - release_year) * log10(uptime_days + 1)") + print("=" * 60) + print() + + for model, year, uptime in examples: + hw = HardwareInfo(cpu_model=model, release_year=year, uptime_days=uptime) + as_score = calculate_antiquity_score(year, uptime) + tier = HardwareTier.from_release_year(year) + + print(f"📟 {model} ({year})") + print(f" Age: {CURRENT_YEAR - year} years") + print(f" Uptime: {uptime} days") + print(f" Tier: {tier.value.upper()} ({tier.multiplier}x)") + print(f" Antiquity Score: {as_score:.2f}") + print() + + print("💡 Remember: This is NOT Proof of Work!") + print(" Older hardware with longer uptime wins, not faster hardware.")