diff --git a/Cargo.lock b/Cargo.lock index 377f893c..709e5e8b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4481,6 +4481,7 @@ dependencies = [ "serde_json", "serial_test", "sha2", + "tempfile", "tokio", "tracing", "tracing-subscriber", diff --git a/crates/enclave-contract/contracts/UpgradeOperator.sol b/crates/enclave-contract/contracts/UpgradeOperator.sol index 85593dd2..6dadd9bf 100644 --- a/crates/enclave-contract/contracts/UpgradeOperator.sol +++ b/crates/enclave-contract/contracts/UpgradeOperator.sol @@ -16,6 +16,7 @@ contract UpgradeOperator { // Keep track of all tags for enumeration if needed bytes32[] public acceptedTags; bytes32[] public deprecatedTags; + bool public initialIsDeprecated = false; // Track if a tag exists to prevent duplicates mapping(bytes32 => bool) public tagExists; @@ -128,7 +129,14 @@ contract UpgradeOperator { * @param measurementHash Hash of the measurements to check */ function isAccepted(bytes32 measurementHash) external view returns (bool) { - return bytes(acceptedMeasurements[measurementHash].tag).length > 0; + if ( + !initialIsDeprecated && + measurementHash == _getMeasurementHash(getInitialMeasurements()) + ) { + return true; + } else { + return bytes(acceptedMeasurements[measurementHash].tag).length > 0; + } } /** @@ -147,6 +155,17 @@ contract UpgradeOperator { function getAcceptedMeasurement( bytes32 measurementHash ) external view returns (Measurements memory) { + if ( + !initialIsDeprecated && + measurementHash == _getMeasurementHash(getInitialMeasurements()) + ) { + Measurements memory initialMeasurements = getInitialMeasurements(); + + if (_getMeasurementHash(initialMeasurements) == measurementHash) { + return initialMeasurements; + } + } + require( bytes(acceptedMeasurements[measurementHash].tag).length > 0, "Measurement not found" @@ -196,4 +215,34 @@ contract UpgradeOperator { ) ); } + + function getInitialMeasurements() + public + pure + returns (Measurements memory) + { + Measurements memory m; + + m.tag = "Initial"; + + m + .mrtd = hex"f858414aef26d52a3b21614bab4bafab13b3ed62ebdd9d46a6be799228c2e27bc0d025cc6e4e90daff827cbe0316bbd9"; + + m + .mrseam = hex"49b66faa451d19ebbdbe89371b8daf2b65aa3984ec90110343e9e2eec116af08850fa20e3b1aa9a874d77a65380ee7e6"; + + m.registrar_slots = new uint8[](4); + m.registrar_slots[0] = 0; + m.registrar_slots[1] = 1; + m.registrar_slots[2] = 2; + m.registrar_slots[3] = 3; + + m.registrar_values = new bytes[](4); + m.registrar_values[0] = new bytes(48); // All zeros by default + m.registrar_values[1] = new bytes(48); + m.registrar_values[2] = new bytes(48); + m.registrar_values[3] = new bytes(48); + + return m; + } } diff --git a/crates/enclave-contract/tests/MultisigUpgradeOperator.t.sol b/crates/enclave-contract/tests/MultisigUpgradeOperator.t.sol index d46ab672..844314d8 100644 --- a/crates/enclave-contract/tests/MultisigUpgradeOperator.t.sol +++ b/crates/enclave-contract/tests/MultisigUpgradeOperator.t.sol @@ -236,6 +236,37 @@ contract MultisigUpgradeOperatorTest is Test { assertFalse(upgradeOperator.isDeprecated(measurement1Hash)); } + function testInitialMeasurments() public view { + // Setup test measurements + UpgradeOperator.Measurements memory m; + + m.tag = "Initial"; + + m + .mrtd = hex"f858414aef26d52a3b21614bab4bafab13b3ed62ebdd9d46a6be799228c2e27bc0d025cc6e4e90daff827cbe0316bbd9"; + + m + .mrseam = hex"49b66faa451d19ebbdbe89371b8daf2b65aa3984ec90110343e9e2eec116af08850fa20e3b1aa9a874d77a65380ee7e6"; + + m.registrar_slots = new uint8[](4); + m.registrar_slots[0] = 0; + m.registrar_slots[1] = 1; + m.registrar_slots[2] = 2; + m.registrar_slots[3] = 3; + + m.registrar_values = new bytes[](4); + m.registrar_values[0] = new bytes(48); // All zeros by default + m.registrar_values[1] = new bytes(48); + m.registrar_values[2] = new bytes(48); + m.registrar_values[3] = new bytes(48); + + console.logBytes(m.mrtd); + console.logBytes(m.mrseam); + bytes32 measurementHash = upgradeOperator.getMeasurementHash(m); + + assertTrue(upgradeOperator.isAccepted(measurementHash)); + } + // Test get vote status function testGetVoteStatus() public { vm.prank(signer1); diff --git a/crates/enclave-contract/tests/multisig_test.rs b/crates/enclave-contract/tests/multisig_test.rs index 910be75d..75e60cb4 100644 --- a/crates/enclave-contract/tests/multisig_test.rs +++ b/crates/enclave-contract/tests/multisig_test.rs @@ -38,7 +38,6 @@ pub async fn test_multisig_upgrade_operator_workflow() -> Result<(), anyhow::Err // Wait a bit for the transaction to be processed sleep(Duration::from_secs(2)); - // Test data for proposal let params = Measurements { tag: "AzureV1".to_string(), mrtd: [ diff --git a/crates/enclave-server/Cargo.toml b/crates/enclave-server/Cargo.toml index f15cf44d..c40ea8a3 100644 --- a/crates/enclave-server/Cargo.toml +++ b/crates/enclave-server/Cargo.toml @@ -52,4 +52,9 @@ sha2.workspace = true [dev-dependencies] serial_test = "3.2.0" +tempfile = "3.17.1" + +[features] +default = [] +systemctl = [] diff --git a/crates/enclave-server/src/key_manager/mod.rs b/crates/enclave-server/src/key_manager/mod.rs index ecde7660..4403bfd6 100644 --- a/crates/enclave-server/src/key_manager/mod.rs +++ b/crates/enclave-server/src/key_manager/mod.rs @@ -9,7 +9,7 @@ const PURPOSE_DERIVE_SALT: &[u8] = b"seismic-purpose-derive-salt"; /// Prefix used in domain separation when deriving purpose-specific keys. const PREFIX: &str = "seismic-purpose"; -#[derive(Zeroize, ZeroizeOnDrop)] +#[derive(Zeroize, ZeroizeOnDrop, Clone)] pub struct Key([u8; 32]); impl AsRef<[u8]> for Key { @@ -18,6 +18,7 @@ impl AsRef<[u8]> for Key { } } +#[derive(Clone)] pub struct KeyManager { root_key: Key, } diff --git a/crates/enclave-server/src/lib.rs b/crates/enclave-server/src/lib.rs index 07e25218..f6a5a6c1 100644 --- a/crates/enclave-server/src/lib.rs +++ b/crates/enclave-server/src/lib.rs @@ -1,11 +1,14 @@ mod attestation; mod key_manager; mod server; +mod snapshot; +mod summit; pub mod utils; const ENCLAVE_DEFAULT_ENDPOINT_IP: &str = "127.0.0.1"; const DEFAULT_RETH_RPC: &str = "127.0.0.1:8545"; pub const ENCLAVE_DEFAULT_ENDPOINT_PORT: u16 = 7878; +const DEFAULT_ENCLAVE_SUMMIT_SOCKET: &str = "/tmp/reth_enclave_socket.ipc"; use anyhow::Result; use clap::Parser; @@ -32,6 +35,10 @@ pub struct Args { #[arg(long)] pub peers: Vec, + /// path to unix socket used to communicate with Summit + #[arg(long, default_value_t = DEFAULT_ENCLAVE_SUMMIT_SOCKET.to_string())] + pub summit_socket: String, + #[arg(long, default_value_t =DEFAULT_RETH_RPC.to_string())] pub reth_rpc_url: String, @@ -48,7 +55,7 @@ impl Args { if self.mock { start_mock_server(addr).await } else { - server::start_server(addr, self.genesis_node, self.peers).await + server::start_server(addr, self).await } } } diff --git a/crates/enclave-server/src/server.rs b/crates/enclave-server/src/server.rs index 17c731f7..68596443 100644 --- a/crates/enclave-server/src/server.rs +++ b/crates/enclave-server/src/server.rs @@ -1,4 +1,11 @@ -use crate::{attestation::AttestationAgent, key_manager::KeyManager, utils::anyhow_to_rpc_error}; +use crate::snapshot::{DATA_DISK_DIR, SNAPSHOT_FILE_PREFIX, restore_from_encrypted_snapshot}; +use crate::{ + Args, + attestation::AttestationAgent, + key_manager::KeyManager, + summit::run_summit_socket, + utils::{anyhow_to_rpc_error, string_to_rpc_error}, +}; use dcap_rs::types::quotes::version_4::QuoteV4; use jsonrpsee::{ core::{RpcResult, async_trait}, @@ -9,7 +16,10 @@ use seismic_enclave::{ AttestationGetEvidenceResponse, GetPurposeKeysResponse, ShareRootKeyResponse, TdxQuoteRpcClient as _, api::TdxQuoteRpcServer, }; +use std::fs; +use std::path::Path; use std::{net::SocketAddr, time::Duration}; +use tokio::io::AsyncWriteExt as _; use tracing::{info, warn}; pub struct TdxQuoteServer { @@ -79,29 +89,132 @@ impl TdxQuoteRpcServer for TdxQuoteServer { } /// Prepares an encrypted snapshot - async fn prepare_encrypted_snapshot(&self) -> RpcResult<()> { - todo!() + async fn download_encrypted_snapshot(&self, epoch: u64, url: String) -> RpcResult<()> { + // Download the file + let response = reqwest::get(&url) + .await + .map_err(|e| string_to_rpc_error(format!("Failed to download snapshot: {}", e)))?; + + if !response.status().is_success() { + return Err(string_to_rpc_error(format!( + "HTTP error: {}", + response.status() + ))); + } + + let bytes = response + .bytes() + .await + .map_err(|e| string_to_rpc_error(format!("Failed to read response body: {}", e)))?; + + // Create the filename + let filename = format!("{SNAPSHOT_FILE_PREFIX}-{epoch}.tar.lz4.enc"); + + // Write to file + let mut file = tokio::fs::File::create(format!("{DATA_DISK_DIR}/{filename}")) + .await + .map_err(|e| { + string_to_rpc_error(format!("Failed to create file {}: {}", filename, e)) + })?; + + file.write_all(&bytes).await.map_err(|e| { + string_to_rpc_error(format!("Failed to write to file {}: {}", filename, e)) + })?; + + Ok(()) } /// Restores from an encrypted snapshot - async fn restore_from_encrypted_snapshot(&self) -> RpcResult<()> { - todo!() + async fn restore_from_encrypted_snapshot(&self, epoch: u64) -> RpcResult<()> { + restore_from_encrypted_snapshot( + &self.key_manager, + epoch, + format!("{DATA_DISK_DIR}/{epoch}-snapshot.tar.lz4.enc"), + ) + .await + .map_err(|e| string_to_rpc_error(format!("Failed to restore from checkpoint: {e}"))) + } + + /// Get an encrypted snapshot from this servers database + async fn get_encrypted_snapshot(&self, epoch: u64) -> RpcResult> { + let snapshot_path = format!( + "{}/{}-{}.tar.lz4.enc", + DATA_DISK_DIR, SNAPSHOT_FILE_PREFIX, epoch + ); + + if !fs::exists(&snapshot_path).unwrap_or_default() { + return Err(string_to_rpc_error(format!( + "No snapshot for epoch {epoch} stored" + ))); + } + + fs::read(snapshot_path).map_err(|e| { + string_to_rpc_error(format!( + "Failed to read snapshot for epoch {}: {}", + epoch, e + )) + }) + } + + /// List all encrypted snapshots stored in this enclave + async fn list_all_encrypted_snapshots(&self) -> RpcResult> { + let dir_path = Path::new(DATA_DISK_DIR); + + let entries = fs::read_dir(dir_path).map_err(|e| { + string_to_rpc_error(format!("Failed to read snapshots directory: {}", e)) + })?; + + let mut epochs = Vec::new(); + let prefix = format!("{}-", SNAPSHOT_FILE_PREFIX); + let suffix = ".tar.lz4.enc"; + + for entry in entries { + let entry = entry.map_err(|e| { + string_to_rpc_error(format!("Failed to read directory entry: {}", e)) + })?; + + if let Some(filename) = entry.file_name().to_str() { + if filename.starts_with(&prefix) && filename.ends_with(suffix) { + // Extract epoch from filename + let epoch_str = filename + .strip_prefix(&prefix) + .and_then(|s| s.strip_suffix(suffix)); + + if let Some(epoch_str) = epoch_str { + if let Ok(epoch) = epoch_str.parse::() { + epochs.push(epoch); + } + } + } + } + } + + epochs.sort_unstable(); + Ok(epochs) + } + + /// List all encrypted snapshots stored in this enclave + async fn list_latest_encrypted_snapshots(&self) -> RpcResult { + let all_snapshots = self.list_all_encrypted_snapshots().await?; + + all_snapshots + .into_iter() + .max() + .ok_or_else(|| string_to_rpc_error("No snapshots found".to_string())) } } -pub async fn start_server( - addr: SocketAddr, - genesis_node: bool, - peers: Vec, -) -> anyhow::Result<()> { +pub async fn start_server(addr: SocketAddr, args: Args) -> anyhow::Result<()> { let attestation_agent = AttestationAgent::new().unwrap(); - let key_manager = if genesis_node { + let key_manager = if args.genesis_node { KeyManager::new_as_genesis()? } else { - fetch_root_key_from_peers(peers, &attestation_agent).await + fetch_root_key_from_peers(args.peers, &attestation_agent).await }; + let summit_handle = tokio::spawn(run_summit_socket(args.summit_socket, key_manager.clone())); + let server = ServerBuilder::default().build(addr).await?; let handle = server.start(TdxQuoteServer::new(attestation_agent, key_manager).into_rpc()); @@ -109,6 +222,10 @@ pub async fn start_server( println!("TDX Quote JSON-RPC Server started at {}", addr); handle.stopped().await; + + // server stopped abort the summit socket + summit_handle.abort(); + Ok(()) } diff --git a/crates/enclave-server/src/snapshot/compress.rs b/crates/enclave-server/src/snapshot/compress.rs index 8b890b01..8f3ef709 100644 --- a/crates/enclave-server/src/snapshot/compress.rs +++ b/crates/enclave-server/src/snapshot/compress.rs @@ -1,4 +1,4 @@ -use libc; +use anyhow::Result; use std::path::Path; use std::process::Command; @@ -63,7 +63,7 @@ pub fn compress_datadir( Ok(()) } -/// Decompresses a `.tar.lz4` snapshot archive into a specified data directory (`data_dir`). +/// Decompresses a `.tar.lz4` snapshot archive into a specified data directory (`output`). /// /// This function restores the contents of a previously created snapshot archive by extracting /// its contents using the `tar` command with LZ4 decompression. It is commonly used for @@ -71,9 +71,8 @@ pub fn compress_datadir( /// /// # Arguments /// -/// * `data_dir` - Path to the directory where the archive should be extracted. -/// * `snapshot_dir` - Path to the directory where the snapshot archive is stored. -/// * `snapshot_file` - Filename of the `.tar.lz4` snapshot archive to restore (e.g., `snapshot.tar.lz4`). +/// * `output` - Path to the directory where the archive should be extracted. +/// * `snapshot_path` - Path to where the compressed snapshot file is stored. /// /// # Returns /// @@ -85,33 +84,32 @@ pub fn compress_datadir( /// - The snapshot file does not exist at the specified path. /// - The `tar` command fails to execute or returns a non-zero exit status. pub fn decompress_datadir( - data_dir: &str, - snapshot_dir: &str, - snapshot_file: &str, + output: impl AsRef, + snapshot_path: impl AsRef, ) -> Result<(), anyhow::Error> { - let snapshot_path = format!("{}/{}", snapshot_dir, snapshot_file); - + let snapshot_path = snapshot_path.as_ref(); // Confirm that the snapshot file exists - if !Path::new(&snapshot_path).exists() { + + if !snapshot_path.exists() { anyhow::bail!( - "Snapshot file not found at expected path: {}", + "Snapshot file not found at expected path: {:?}", snapshot_path ); } // change the umask so that files can be written to by the user's group // so that reth can write to the files - let old_umask = unsafe { libc::umask(0o002) }; + // let old_umask = unsafe { libc::umask(0o002) }; // Run the tar command to decompress the snapshot let output = Command::new("tar") - .current_dir(data_dir) + .current_dir(output) .args([ "--use-compress-program=lz4", "--no-same-permissions", "--no-same-owner", "-xvPf", - &snapshot_path, + &snapshot_path.to_string_lossy(), ]) .output() .map_err(|e| anyhow::anyhow!("Failed to spwan tar process: {:?}", e))?; @@ -129,9 +127,9 @@ pub fn decompress_datadir( } // change the umask back - unsafe { - libc::umask(old_umask); - } + // unsafe { + // libc::umask(old_umask); + // } Ok(()) } @@ -139,8 +137,8 @@ pub fn decompress_datadir( #[cfg(test)] mod tests { use super::*; - use crate::snapshot::SNAPSHOT_FILE; - use crate::utils::test_utils::{generate_dummy_file, read_first_n_bytes}; + use crate::snapshot::SNAPSHOT_FILE_PREFIX; + use crate::utils::{generate_dummy_file, read_first_n_bytes}; use std::fs; use std::path::Path; @@ -148,15 +146,16 @@ mod tests { #[test] fn test_compress_datadir() -> Result<(), anyhow::Error> { + let snapshot_file = format!("{SNAPSHOT_FILE_PREFIX}-0.tar.lz4"); // Set up a temp dir let temp_data_dir = tempdir().unwrap(); let temp_data_dir_path = temp_data_dir.path(); - let temp_snapshot_dir = tempdir().unwrap(); + fs::create_dir(temp_data_dir_path.join("db"))?; let snapshot_path = &format!( "{}/{}", - temp_snapshot_dir.path().to_str().unwrap(), - SNAPSHOT_FILE + temp_data_dir_path.to_str().unwrap(), + &snapshot_file ); let mdbx_path = temp_data_dir_path.join("db").join("mdbx.dat"); @@ -167,20 +166,20 @@ mod tests { // Create the snapshot compress_datadir( - temp_data_dir.path().to_str().unwrap(), - temp_snapshot_dir.path().to_str().unwrap(), - SNAPSHOT_FILE, + temp_data_dir_path.join("db").to_str().unwrap(), + temp_data_dir_path.to_str().unwrap(), + &snapshot_file, ) .unwrap(); + assert!(Path::new(&snapshot_path).exists()); // Confirm that we recover the original file fs::remove_file(&mdbx_path)?; assert!(!Path::new(&mdbx_path).exists()); decompress_datadir( - temp_data_dir.path().to_str().unwrap(), - temp_snapshot_dir.path().to_str().unwrap(), - SNAPSHOT_FILE, + temp_data_dir_path.join("db").to_str().unwrap(), + snapshot_path, ) .unwrap(); assert!(Path::new(&mdbx_path).exists()); diff --git a/crates/enclave-server/src/snapshot/file_encrypt.rs b/crates/enclave-server/src/snapshot/file_encrypt.rs index 91380629..9ced2df1 100644 --- a/crates/enclave-server/src/snapshot/file_encrypt.rs +++ b/crates/enclave-server/src/snapshot/file_encrypt.rs @@ -1,5 +1,5 @@ -use crate::key_manager::NetworkKeyProvider; -use seismic_enclave::crypto::{decrypt_file, encrypt_file}; +use crate::key_manager::KeyManager; +use seismic_enclave::{decrypt_file, encrypt_file}; use std::path::Path; @@ -12,6 +12,7 @@ use std::path::Path; /// /// # Arguments /// +/// * `kp` - KeyManager to derive the encryption key /// * `input_dir` - Directory containing the plaintext snapshot file. /// * `output_dir` - Directory where the encrypted file should be written. /// * `snapshot_file` - Filename of the snapshot file to encrypt (e.g., `snapshot.tar.lz4`). @@ -27,8 +28,7 @@ use std::path::Path; /// - The input snapshot file does not exist. /// - Encryption fails due to an internal error in the encryption process. pub fn encrypt_snapshot( - kp: &impl NetworkKeyProvider, - epoch: u64, + kp: &KeyManager, input_dir: &str, output_dir: &str, snapshot_file: &str, @@ -44,8 +44,8 @@ pub fn encrypt_snapshot( ); } - let snapshot_key = kp.get_snapshot_key(epoch); - encrypt_file(&input_path, &output_path, &snapshot_key) + let snapshot_key = kp.get_snapshot_key(0); + encrypt_file(input_path, output_path, &snapshot_key) .map_err(|e| anyhow::anyhow!("Failed to encrypt snapshot file: {:?}", e))?; Ok(()) @@ -59,9 +59,10 @@ pub fn encrypt_snapshot( /// /// # Arguments /// -/// * `input_dir` - Directory containing the encrypted snapshot file (e.g., `snapshot.tar.lz4.enc`). +/// * `kp` - KeyManager to derive the encryption key +/// * `epoch` - The epoch this snapshot is from +/// * `encrypted_snapshot` - path to the encrypted snapshot file (e.g., `snapshot.tar.lz4.enc`). /// * `output_dir` - Directory where the decrypted snapshot file should be saved. -/// * `snapshot_file` - Base filename of the snapshot archive (without `.enc` suffix). /// /// # Returns /// @@ -74,25 +75,24 @@ pub fn encrypt_snapshot( /// - The encrypted snapshot file does not exist. /// - Decryption fails due to an incorrect or unavailable key, or an internal decryption error. pub fn decrypt_snapshot( - kp: &impl NetworkKeyProvider, + kp: &KeyManager, epoch: u64, - input_dir: &str, - output_dir: &str, - snapshot_file: &str, + encrypted_snapshot: impl AsRef, + output_dir: impl AsRef, ) -> Result<(), anyhow::Error> { - let input_path = &format!("{}/{}.enc", input_dir, snapshot_file); - let output_path = &format!("{}/{}", output_dir, snapshot_file); + let src = encrypted_snapshot.as_ref(); + let dest = output_dir.as_ref(); // confirm that the snapshot file exists - if !Path::new(&input_path).exists() { + if !src.exists() { anyhow::bail!( "Encrypted Snapshot file not found at expected path: {:?}", - &input_path + &src ); } let snapshot_key = kp.get_snapshot_key(epoch); - decrypt_file(&input_path, &output_path, &snapshot_key) + decrypt_file(src, dest, &snapshot_key) .map_err(|e| anyhow::anyhow!("Failed to decrypt snapshot file: {:?}", e))?; Ok(()) @@ -101,25 +101,25 @@ pub fn decrypt_snapshot( #[cfg(test)] mod tests { use super::*; - use crate::key_manager::KeyManagerBuilder; - use crate::snapshot::SNAPSHOT_FILE; - use crate::utils::test_utils::{generate_dummy_file, read_first_n_bytes}; + use crate::key_manager::KeyManager; + use crate::snapshot::SNAPSHOT_FILE_PREFIX; + use crate::utils::{generate_dummy_file, read_first_n_bytes}; use anyhow::Error; - use std::fs; use std::path::Path; use tempfile::tempdir; #[test] fn test_encrypt_snapshot() -> Result<(), Error> { - let kp = KeyManagerBuilder::build_mock().unwrap(); + let kp = KeyManager::new_as_genesis().unwrap(); let epoch = 0; + let snapshot_file = format!("{SNAPSHOT_FILE_PREFIX}-{epoch}.tar.lz4"); // Set up a temp dir let temp_dir = tempdir().unwrap(); let temp_path = temp_dir.path(); - let snapshot_path = temp_path.join(SNAPSHOT_FILE); - let ciphertext_path = temp_path.join(format!("{}.enc", SNAPSHOT_FILE)); + let snapshot_path = temp_path.join(&snapshot_file); + let ciphertext_path = temp_path.join(format!("{}.enc", &snapshot_file)); // Generate a dummy database file (e.g., 10MB) generate_dummy_file(&snapshot_path, 10 * 1024 * 1024)?; @@ -131,25 +131,17 @@ mod tests { // Create the encrypted snapshot encrypt_snapshot( &kp, - epoch, temp_path.to_str().unwrap(), temp_path.to_str().unwrap(), - SNAPSHOT_FILE, + &snapshot_file, ) .unwrap(); assert!(Path::new(&ciphertext_path).exists()); // Confirm that we recover the original file - fs::remove_file(&snapshot_path)?; + std::fs::remove_file(&snapshot_path)?; assert!(!Path::new(&snapshot_path).exists()); - decrypt_snapshot( - &kp, - epoch, - temp_path.to_str().unwrap(), - temp_path.to_str().unwrap(), - SNAPSHOT_FILE, - ) - .unwrap(); + decrypt_snapshot(&kp, epoch, ciphertext_path, &snapshot_path).unwrap(); assert!(Path::new(&snapshot_path).exists()); // Check metadata of restored file matches the original diff --git a/crates/enclave-server/src/snapshot/mod.rs b/crates/enclave-server/src/snapshot/mod.rs index bc974236..03f5f848 100644 --- a/crates/enclave-server/src/snapshot/mod.rs +++ b/crates/enclave-server/src/snapshot/mod.rs @@ -1,100 +1,141 @@ mod compress; mod file_encrypt; -use crate::key_manager::NetworkKeyProvider; -use compress::{compress_datadir, decompress_datadir}; -use file_encrypt::{decrypt_snapshot, encrypt_snapshot}; -use std::fs; - -#[cfg(not(feature = "supervisorctl"))] -use crate::utils::service::{start_reth, stop_reth}; -#[cfg(feature = "supervisorctl")] -use crate::utils::supervisorctl::{start_reth, stop_reth}; - -#[cfg(not(feature = "supervisorctl"))] -pub const RETH_DATA_DIR: &str = "/persistent/reth"; // correct when running with yocto builds -#[cfg(feature = "supervisorctl")] +use crate::{ + key_manager::KeyManager, + utils::{copy_dir_all, rename_or_copy, start_reth, start_summit, stop_reth, stop_summit}, +}; + +use anyhow::Result; +use compress::compress_datadir; +pub use compress::decompress_datadir; +pub use file_encrypt::{decrypt_snapshot, encrypt_snapshot}; +use std::{ + fs, + path::{Path, PathBuf}, +}; + +#[cfg(feature = "systemctl")] +pub const RETH_DATA_DIR: &str = "/persistent/reth"; // correct when running with Mkosi builds + +#[cfg(not(feature = "systemctl"))] pub const RETH_DATA_DIR: &str = "/home/azureuser/.reth"; // correct when running reth with `cargo run` on devbox -pub const DATA_DISK_DIR: &str = "/mnt/datadisk"; +#[cfg(feature = "systemctl")] +pub const SUMMIT_DATA_DIR: &str = "/persistent/summit/db"; // correct when running with Mkosi builds + +#[cfg(not(feature = "systemctl"))] +pub const SUMMIT_DATA_DIR: &str = "/home/azureuser/.summit/db"; // correct when running reth with `cargo run` on devbox + +pub const DATA_DISK_DIR: &str = "/persistent/snapshots"; pub const SNAPSHOT_DIR: &str = "/tmp/snapshot"; -pub const SNAPSHOT_FILE: &str = "seismic_reth_snapshot.tar.lz4"; +pub const SNAPSHOT_FILE_PREFIX: &str = "seismic_reth_snapshot"; /// Prepares an encrypted snapshot of the Reth database and stores it on a mounted data disk. /// /// This function performs the following steps: /// 1. Stops the Reth process to ensure the database is in a consistent state. -/// 2. Compresses the database directory into a snapshot archive. -/// 3. Encrypts the compressed snapshot using the snapshot key. -/// 4. Removes the temporary unencrypted snapshot archive. -/// 5. Restarts the Reth process after the snapshot is created. +/// 2. Copies the database directory into a snapshot archive. +/// 3. Starts reth back up +/// 4. Places the summit checkpoint with the reth checkpoint /// +/// This function copies the reth db and prepares it for encryption. It is split into two steps so we can keep the amount of time reth is stopped to a minimum /// After running this function, the encrypted snapshot is stored in a mounted data disk /// (separate from the OS disk) for safe backup or transfer. /// -/// # Arguments -/// * `reth_data_dir` - Path to the Reth database directory. -/// * `data_disk_dir` - Path to the mounted data disk where the encrypted snapshot will be saved. -/// * `snapshot_dir` - Path to a temporary directory used to hold the unencrypted snapshot archive. -/// * `snapshot_file` - Filename of the snapshot archive (e.g., `snapshot.tar.lz4`). -/// /// # Errors /// Returns an error if any step in the process (stopping Reth, compression, encryption, /// removing temporary data, or restarting Reth) fails. -pub fn prepare_encrypted_snapshot( - kp: &impl NetworkKeyProvider, +pub async fn prepare_encrypted_snapshot( epoch: u64, - reth_data_dir: &str, - data_disk_dir: &str, - snapshot_dir: &str, - snapshot_file: &str, + summit_checkpoint: Vec, ) -> Result<(), anyhow::Error> { - fs::create_dir_all(snapshot_dir) - .map_err(|e| anyhow::anyhow!("Failed to create snapshot directory: {:?}", e))?; - stop_reth()?; - compress_datadir(reth_data_dir, snapshot_dir, snapshot_file)?; - encrypt_snapshot(kp, epoch, snapshot_dir, data_disk_dir, snapshot_file)?; - fs::remove_dir_all(snapshot_dir) + let destination = PathBuf::from(SNAPSHOT_DIR).join(format!("{}-snapshot", epoch)); + + stop_reth().await?; + copy_dir_all(RETH_DATA_DIR, destination.join("reth"))?; + start_reth().await?; + + fs::write(destination.join("summit_checkpoint"), summit_checkpoint)?; + + Ok(()) +} + +/// Finished the encryption on a prepared snapshot. This is the final step of the process and reth will have been restarted by now +pub async fn finish_encrypted_snapshot(kp: &KeyManager, epoch: u64) -> Result<()> { + let snapshot_file = format!("{SNAPSHOT_FILE_PREFIX}-{epoch}.tar.lz4"); + compress_datadir( + &format!("{SNAPSHOT_DIR}/{epoch}-snapshot"), + SNAPSHOT_DIR, + &snapshot_file, + )?; + + encrypt_snapshot(kp, SNAPSHOT_DIR, DATA_DISK_DIR, &snapshot_file)?; + fs::remove_dir_all(SNAPSHOT_DIR) .map_err(|e| anyhow::anyhow!("Failed to remove snapshot directory: {:?}", e))?; - start_reth()?; Ok(()) } /// Restores the Reth database from an encrypted snapshot stored on a mounted data disk. /// /// This function performs the following steps: -/// 1. Stops the Reth process to allow safe restoration. +/// 1. Stops the Reth/Summit process to allow safe restoration. /// 2. Decrypts the encrypted snapshot archive using the snapshot key. /// 3. Decompresses the decrypted archive into the database directory. /// 4. Removes the temporary snapshot data after restoration. -/// 5. Restarts the Reth process with the restored database state. +/// 5. Restarts the Reth/Summit process with the restored database state. /// /// The encrypted snapshot must be available on the mounted data disk before calling this function. /// /// # Arguments -/// * `reth_data_dir` - Path to the Reth database directory where the snapshot will be restored. -/// * `data_disk_dir` - Path to the mounted data disk where the encrypted snapshot archive is located. -/// * `snapshot_dir` - Temporary directory used during the decryption and decompression steps. -/// * `snapshot_file` - Filename of the snapshot archive (e.g., `snapshot.tar.lz4`). +/// * `kp` - Path to the Reth database directory where the snapshot will be restored. +/// * `epoch` - Path to the mounted data disk where the encrypted snapshot archive is located. +/// * `encrypted_snapshot_path` - Path to the snapshot archive /// /// # Errors /// Returns an error if any step in the process (stopping Reth, decryption, decompression, /// removing temporary data, or restarting Reth) fails. -pub fn restore_from_encrypted_snapshot( - kp: &impl NetworkKeyProvider, +pub async fn restore_from_encrypted_snapshot( + kp: &KeyManager, epoch: u64, - reth_data_dir: &str, - data_disk_dir: &str, - snapshot_dir: &str, - snapshot_file: &str, + encrypted_snapshot_path: impl AsRef, ) -> Result<(), anyhow::Error> { - fs::create_dir_all(snapshot_dir) + fs::create_dir_all(SNAPSHOT_DIR) .map_err(|e| anyhow::anyhow!("Failed to create snapshot directory: {:?}", e))?; - stop_reth()?; - decrypt_snapshot(kp, epoch, data_disk_dir, snapshot_dir, snapshot_file)?; - decompress_datadir(reth_data_dir, snapshot_dir, snapshot_file)?; - fs::remove_dir_all(snapshot_dir) + let compressed_path = format!("{SNAPSHOT_DIR}/{epoch}-snapshot.tar.lz4"); + let uncompressed_path = PathBuf::from(format!("{SNAPSHOT_DIR}/{epoch}-snapshot")); + + // Decrypt snapshot to temp folder + decrypt_snapshot(kp, epoch, encrypted_snapshot_path, &compressed_path)?; + // decompress snapshot into temp folder + decompress_datadir(&uncompressed_path, compressed_path)?; + + // stop reth and summit + stop_reth().await?; + stop_summit().await?; + + // delete both databases + std::fs::remove_dir_all(RETH_DATA_DIR)?; + std::fs::remove_dir_all(SUMMIT_DATA_DIR)?; + + // move databases in proper location + + // Try to rename first (fastest method, works if on same filesystem) + rename_or_copy(uncompressed_path.join("reth"), RETH_DATA_DIR)?; + // start reth + start_reth().await?; + // mv summit checkpoint to proper path + fs::create_dir_all(SUMMIT_DATA_DIR) + .map_err(|e| anyhow::anyhow!("Failed to create snapshot directory: {:?}", e))?; + rename_or_copy( + uncompressed_path.join("summit_checkpoint"), + format!("{SUMMIT_DATA_DIR}/checkpoint"), + )?; + // start summit + start_summit().await?; + + fs::remove_dir_all(SNAPSHOT_DIR) .map_err(|e| anyhow::anyhow!("Failed to remove snapshot directory: {:?}", e))?; - start_reth()?; + Ok(()) } diff --git a/crates/enclave-server/src/summit.rs b/crates/enclave-server/src/summit.rs new file mode 100644 index 00000000..dd9c6d99 --- /dev/null +++ b/crates/enclave-server/src/summit.rs @@ -0,0 +1,93 @@ +use anyhow::Result; +use std::fs; +use tokio::{ + io::{AsyncReadExt as _, AsyncWriteExt as _}, + net::UnixListener, +}; +use tracing::{error, info}; + +use crate::{ + key_manager::KeyManager, + snapshot::{finish_encrypted_snapshot, prepare_encrypted_snapshot}, +}; + +pub async fn run_summit_socket(socket_path: String, key_manager: KeyManager) -> Result<()> { + let _ = fs::remove_file(&socket_path); + let listener = UnixListener::bind(&socket_path)?; + info!("Bound to Summit->Enclave unix socket @ {socket_path}"); + + while let Ok((mut stream, _)) = listener.accept().await { + info!("Performing backup"); + + // Read epoch number as u64 (8 bytes, little-endian) + let mut epoch_bytes = [0u8; 8]; + let epoch = match stream.read_exact(&mut epoch_bytes).await { + Ok(_) => { + let epoch = u64::from_le_bytes(epoch_bytes); + info!("Received backup ID: {}", epoch); + epoch + } + Err(e) => { + error!("Failed to read epoch over summit socket: {}", e); + if let Err(e) = stream.write_all(b"ERROR: Failed to read epoch\n").await { + error!("Failed to send error response: {}", e); + } + continue; + } + }; + + // Send acknowledgment that epoch was received + if let Err(e) = stream.write_all(b"ACK").await { + error!("Failed to send acknowledgment: {}", e); + continue; + } + + // Read the length of the checkpoint data (u32, 4 bytes, little-endian) + let mut len_bytes = [0u8; 4]; + if let Err(e) = stream.read_exact(&mut len_bytes).await { + error!("Failed to read checkpoint data length: {}", e); + continue; + } + let data_len = u32::from_le_bytes(len_bytes) as usize; + info!("Expecting {} bytes of checkpoint data", data_len); + // Read the checkpoint data + let mut checkpoint_data = vec![0u8; data_len]; + + match stream.read_exact(&mut checkpoint_data).await { + Ok(_) => { + info!( + "Received {} bytes of checkpoint data for epoch {}", + data_len, epoch + ); + } + Err(e) => { + error!("Failed to read checkpoint data: {}", e); + continue; + } + } + + // Copy the database and prepare for it to be encrypted + let response = match prepare_encrypted_snapshot(epoch, checkpoint_data).await { + Ok(_) => b"ACK", + Err(e) => { + error!("Backup failed: {}", e); + b"ACK" + } + }; + + // Let summit know that it can continue and reth is restarted + if let Err(e) = stream.write_all(response).await { + error!("Failed to respond to Summit: {}", e); + } + + // Finish the encryption + if let Err(e) = finish_encrypted_snapshot(&key_manager, epoch).await { + error!("Unable to finish encrypting snapshot for epoch {epoch}: {e}"); + // at this point there is not much more we can do Summit is already continuing we will have to wait until next time a snapshot is requested to try again + } + + let _ = stream.flush().await; + } + + Ok(()) +} diff --git a/crates/enclave-server/src/utils.rs b/crates/enclave-server/src/utils.rs index df700ce7..6c15cab9 100644 --- a/crates/enclave-server/src/utils.rs +++ b/crates/enclave-server/src/utils.rs @@ -1,7 +1,22 @@ +use anyhow::{Result, anyhow}; use jsonrpsee::types::{ErrorCode, ErrorObjectOwned}; use libc::uid_t; +use std::{ + fs::{self, File}, + io::{Read as _, Write as _}, + path::Path, + process::Command, +}; use tracing::info; use tracing_subscriber::{EnvFilter, FmtSubscriber}; +#[cfg(feature = "systemctl")] +pub const RETH_CONTROL_CMD: &str = "systemctl"; + +#[cfg(not(feature = "systemctl"))] +pub const RETH_CONTROL_CMD: &str = "supervisorctl"; + +const SEISMIC_RETH_SERVICE: &str = "reth"; +const SEISMIC_SUMMIT_SERVICE: &str = "summit"; pub fn get_current_uid() -> uid_t { unsafe { libc::getuid() } @@ -11,6 +26,10 @@ pub fn anyhow_to_rpc_error(e: anyhow::Error) -> ErrorObjectOwned { ErrorObjectOwned::owned(ErrorCode::InternalError.code(), e.to_string(), None::<()>) } +pub fn string_to_rpc_error(e: String) -> ErrorObjectOwned { + ErrorObjectOwned::owned(ErrorCode::InternalError.code(), e, None::<()>) +} + /// Checks if the current user has root (sudo) privileges. /// /// This function runs the `id -u` command, which returns the current user's ID. @@ -51,3 +70,126 @@ pub fn init_tracing() { info!("Enclave server tracing initialized"); } + +//reads the first n bytes of a file +// useful for checking file equality +pub fn read_first_n_bytes(file_path: &str, n: usize) -> Result, anyhow::Error> { + let mut file = File::open(file_path)?; + let mut buffer = vec![0; n]; // Allocate a buffer of size `n` + let bytes_read = file.read(&mut buffer)?; + + buffer.truncate(bytes_read); // Truncate buffer in case file is smaller than `n` + Ok(buffer) +} + +// Function to generate a dummy database file +pub fn generate_dummy_file(path: &Path, size: usize) -> std::io::Result<()> { + let mut file = File::create(path)?; + file.write_all(&vec![0u8; size])?; // Fill with zero bytes + Ok(()) +} + +pub async fn start_reth() -> Result<()> { + let mut child = Command::new(RETH_CONTROL_CMD) + .arg("start") + .arg(SEISMIC_RETH_SERVICE) + .spawn() + .unwrap(); + + let status = child.wait().unwrap(); + + if status.success() { + Ok(()) + } else { + Err(anyhow!("Failed to start reth")) + } +} + +pub async fn stop_reth() -> Result<()> { + let mut child = Command::new(RETH_CONTROL_CMD) + .arg("stop") + .arg(SEISMIC_RETH_SERVICE) + .spawn() + .unwrap(); + + let status = child.wait().unwrap(); + + if status.success() { + Ok(()) + } else { + Err(anyhow!("Failed to stop reth")) + } +} + +pub async fn start_summit() -> Result<()> { + let mut child = Command::new(RETH_CONTROL_CMD) + .arg("start") + .arg(SEISMIC_SUMMIT_SERVICE) + .spawn() + .unwrap(); + + let status = child.wait().unwrap(); + + if status.success() { + Ok(()) + } else { + Err(anyhow!("Failed to start summit")) + } +} + +pub async fn stop_summit() -> Result<()> { + let mut child = Command::new(RETH_CONTROL_CMD) + .arg("stop") + .arg(SEISMIC_SUMMIT_SERVICE) + .spawn() + .unwrap(); + + let status = child.wait().unwrap(); + + if status.success() { + Ok(()) + } else { + Err(anyhow!("Failed to stop summit")) + } +} + +pub fn copy_dir_all(src: impl AsRef, dst: impl AsRef) -> std::io::Result<()> { + let src = src.as_ref(); + let dst = dst.as_ref(); + + // Create the destination directory + fs::create_dir_all(dst)?; + + // Iterate through the source directory + for entry in fs::read_dir(src)? { + let entry = entry?; + let file_type = entry.file_type()?; + let src_path = entry.path(); + let dst_path = dst.join(entry.file_name()); + + if file_type.is_dir() { + // Recursively copy subdirectories + copy_dir_all(&src_path, &dst_path)?; + } else { + // Copy files + fs::copy(&src_path, &dst_path)?; + } + } + + Ok(()) +} + +/// Moves a file or folder from one location to another. First attempts rename(fastest method, works if on same filesystem) then tries copy+delete +pub fn rename_or_copy(src: impl AsRef, dest: impl AsRef) -> Result<()> { + match fs::rename(&src, &dest) { + Ok(_) => Ok(()), + Err(e) if e.kind() == std::io::ErrorKind::CrossesDevices => { + // If rename fails due to crossing filesystems, copy and delete + copy_dir_all(&src, dest)?; + let _ = fs::remove_dir_all(src); // This failing isnt critical + Ok(()) + } + Err(e) => Err(e), + }?; + Ok(()) +} diff --git a/crates/enclave-server/tests/integration/utils.rs b/crates/enclave-server/tests/integration/utils.rs index 5d79b557..b2f7450e 100644 --- a/crates/enclave-server/tests/integration/utils.rs +++ b/crates/enclave-server/tests/integration/utils.rs @@ -7,6 +7,7 @@ pub fn get_args(n: u16, genesis_node: bool, peers: Vec) -> Args { port, genesis_node, peers, + summit_socket: "/tmp/reth_enclave_socket.ipc".to_string(), reth_rpc_url: "0.0.0.0:8545".to_string(), mock: false, } diff --git a/crates/enclave/src/api.rs b/crates/enclave/src/api.rs index e36cfef4..1d12b30f 100644 --- a/crates/enclave/src/api.rs +++ b/crates/enclave/src/api.rs @@ -25,12 +25,24 @@ pub trait TdxQuoteRpc { async fn boot_share_root_key(&self, quote: Vec) -> RpcResult; /// Prepares an encrypted snapshot - #[method(name = "snapshot.prepare_encrypted_snapshot")] - async fn prepare_encrypted_snapshot(&self) -> RpcResult<()>; + #[method(name = "snapshot.download_encrypted_snapshot")] + async fn download_encrypted_snapshot(&self, epoch: u64, url: String) -> RpcResult<()>; /// Restores from an encrypted snapshot #[method(name = "snapshot.restore_from_encrypted_snapshot")] - async fn restore_from_encrypted_snapshot(&self) -> RpcResult<()>; + async fn restore_from_encrypted_snapshot(&self, epoch: u64) -> RpcResult<()>; + + /// Get an encrypted snapshot from this servers database + #[method(name = "snapshot.get_encrypted_snapshot")] + async fn get_encrypted_snapshot(&self, epoch: u64) -> RpcResult>; + + /// List all encrypted snapshots stored in this enclave + #[method(name = "snapshot.list_all_encrypted_snapshots")] + async fn list_all_encrypted_snapshots(&self) -> RpcResult>; + + /// List all encrypted snapshots stored in this enclave + #[method(name = "snapshot.list_latest_stored_encrypted_snapshot")] + async fn list_latest_encrypted_snapshots(&self) -> RpcResult; } #[derive(Debug, Serialize, Deserialize, Clone)] diff --git a/crates/enclave/src/crypto.rs b/crates/enclave/src/crypto.rs index 171d6ab2..b81f8c3c 100644 --- a/crates/enclave/src/crypto.rs +++ b/crates/enclave/src/crypto.rs @@ -8,8 +8,8 @@ pub use schnorrkel::keys::Keypair as SchnorrkelKeypair; use schnorrkel::{ExpansionMode, MiniSecretKey}; use secp256k1::{Message, PublicKey, Secp256k1, SecretKey, ecdh::SharedSecret, ecdsa::Signature}; use sha2::{Digest, Sha256}; -use std::str::FromStr; use std::{fs, io::Read, io::Write}; +use std::{path::Path, str::FromStr}; use rand::RngCore; use serde::{Deserialize, Serialize}; @@ -300,6 +300,11 @@ pub fn encrypt_file( // Encrypt the data let ciphertext = aes_encrypt(key, &plaintext, nonce.clone()).expect("Encryption failed!"); + // Get the parent directory and create it if it doesn't exist + if let Some(parent) = Path::new(output_path).parent() { + fs::create_dir_all(parent)?; + } + // Save nonce + ciphertext together let mut output_file = fs::File::create(output_path) .map_err(|e| anyhow::anyhow!("Failed to create output file {}: {:?}", output_path, e))?; @@ -322,12 +327,15 @@ pub fn encrypt_file( /// # Returns /// Returns `Ok(())` on success, or an error if reading, decryption, or writing fails. pub fn decrypt_file( - input_path: &str, - output_path: &str, + input_path: impl AsRef, + output_path: impl AsRef, key: &Key, ) -> Result<(), anyhow::Error> { + let input_path = input_path.as_ref(); + let output_path = output_path.as_ref(); + let mut file = fs::File::open(input_path) - .map_err(|e| anyhow::anyhow!("Failed to open input file {}: {:?}", input_path, e))?; + .map_err(|e| anyhow::anyhow!("Failed to open input file {:?}: {:?}", input_path, e))?; let mut file_data = Vec::new(); file.read_to_end(&mut file_data)?; @@ -344,6 +352,6 @@ pub fn decrypt_file( let decrypted_data = aes_decrypt(key, ciphertext, nonce_bytes)?; fs::write(output_path, decrypted_data) - .map_err(|e| anyhow::anyhow!("Failed to write output file {}: {:?}", output_path, e))?; + .map_err(|e| anyhow::anyhow!("Failed to write output file {:?}: {:?}", output_path, e))?; Ok(()) } diff --git a/crates/enclave/src/mock.rs b/crates/enclave/src/mock.rs index 1df21bde..5450ab72 100644 --- a/crates/enclave/src/mock.rs +++ b/crates/enclave/src/mock.rs @@ -59,11 +59,26 @@ impl TdxQuoteRpcServer for MockServer { }) } - async fn prepare_encrypted_snapshot(&self) -> RpcResult<()> { + async fn download_encrypted_snapshot(&self, _epoch: u64, _url: String) -> RpcResult<()> { unimplemented!("prepare_encrypted_snapshot is not implemented for mock server") } - async fn restore_from_encrypted_snapshot(&self) -> RpcResult<()> { + async fn restore_from_encrypted_snapshot(&self, _epoch: u64) -> RpcResult<()> { unimplemented!("restore_encrypted_snapshot is not implemented for mock server") } + + /// Get an encrypted snapshot from this servers database + async fn get_encrypted_snapshot(&self, _epoch: u64) -> RpcResult> { + unimplemented!("get_encrypted_snapshot is not implemented for mock server") + } + + /// List all encrypted snapshots stored in this enclave + async fn list_all_encrypted_snapshots(&self) -> RpcResult> { + unimplemented!("list_all_encrypted_snapshot is not implemented for mock server") + } + + /// List all encrypted snapshots stored in this enclave + async fn list_latest_encrypted_snapshots(&self) -> RpcResult { + unimplemented!("list_latest_encrypted_snapshot is not implemented for mock server") + } }