diff --git a/Cargo.lock b/Cargo.lock index c46d3e3871..4214c704cb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3718,22 +3718,30 @@ dependencies = [ name = "mc-fog-ledger-connection" version = "4.1.0-pre0" dependencies = [ + "aes-gcm", "displaydoc", + "futures", "grpcio", "mc-api", + "mc-attest-ake", + "mc-attest-core", "mc-attest-verifier", "mc-blockchain-types", "mc-common", "mc-crypto-keys", + "mc-crypto-noise", "mc-fog-api", "mc-fog-enclave-connection", "mc-fog-types", "mc-fog-uri", + "mc-rand", "mc-transaction-core", "mc-util-grpc", + "mc-util-serial", "mc-util-uri", "protobuf", "retry", + "sha2 0.10.6", ] [[package]] @@ -3798,6 +3806,7 @@ dependencies = [ "aligned-cmov", "mc-attest-core", "mc-attest-enclave-api", + "mc-blockchain-types", "mc-common", "mc-crypto-ake-enclave", "mc-crypto-keys", @@ -3812,6 +3821,7 @@ dependencies = [ "mc-transaction-core", "mc-util-serial", "mc-watcher-api", + "yare", ] [[package]] @@ -3831,13 +3841,16 @@ dependencies = [ name = "mc-fog-ledger-server" version = "4.1.0-pre0" dependencies = [ + "aes-gcm", "clap 4.1.11", "displaydoc", "futures", "grpcio", + "itertools", "lazy_static", "mc-account-keys", "mc-api", + "mc-attest-ake", "mc-attest-api", "mc-attest-core", "mc-attest-enclave-api", @@ -3858,6 +3871,7 @@ dependencies = [ "mc-fog-types", "mc-fog-uri", "mc-ledger-db", + "mc-rand", "mc-sgx-report-cache-untrusted", "mc-transaction-core", "mc-util-build-script", @@ -3874,11 +3888,14 @@ dependencies = [ "mc-util-uri", "mc-watcher", "mc-watcher-api", + "portpicker", "rand", "retry", "serde", "serde_json", + "sha2 0.10.6", "tempfile", + "tokio", "url", ] @@ -3886,6 +3903,8 @@ dependencies = [ name = "mc-fog-ledger-test-infra" version = "4.1.0-pre0" dependencies = [ + "http", + "hyper", "mc-attest-core", "mc-attest-enclave-api", "mc-blockchain-types", @@ -3897,6 +3916,8 @@ dependencies = [ "mc-ledger-db", "mc-sgx-report-cache-api", "mc-transaction-core", + "rand", + "tokio", ] [[package]] @@ -8087,6 +8108,7 @@ dependencies = [ "memchr", "mio", "num_cpus", + "parking_lot 0.12.0", "pin-project-lite", "signal-hook-registry", "socket2", diff --git a/fog/api/proto/fog_common.proto b/fog/api/proto/fog_common.proto index 6c0871a217..bfce28968a 100644 --- a/fog/api/proto/fog_common.proto +++ b/fog/api/proto/fog_common.proto @@ -13,3 +13,8 @@ message BlockRange { /// One-past-the-end of the range uint64 end_block = 2; } + +message AddShardRequest { + // The shard's URI in string format. + string shard_uri = 1; +} diff --git a/fog/api/proto/ledger.proto b/fog/api/proto/ledger.proto index de3ec99809..a671e36a93 100644 --- a/fog/api/proto/ledger.proto +++ b/fog/api/proto/ledger.proto @@ -8,6 +8,96 @@ import "fog_common.proto"; package fog_ledger; option go_package = "mobilecoin/api"; +import "google/protobuf/empty.proto"; + +//// +// Ledger router API +//// + +service LedgerAPI { + rpc Request(stream LedgerRequest) returns (stream LedgerResponse) {} +} + +service LedgerRouterAdminAPI { + // Adds a shard to the Fog Ledger Router's list of shards to query. + rpc AddShard(fog_common.AddShardRequest) returns (google.protobuf.Empty) {} +} + +/// Fulfills requests sent by the Fog Ledger Router. This is not meant to fulfill requests sent directly by the client. +service KeyImageStoreAPI { + /// This is called to perform IX key exchange with the enclave before calling GetOutputs. + rpc Auth(attest.AuthMessage) returns (attest.AuthMessage) {} + /// Input should be an encrypted MultiKeyImageStoreRequest, result is an encrypted response. + rpc MultiKeyImageStoreQuery(MultiKeyImageStoreRequest) returns (MultiKeyImageStoreResponse) {} +} + +message LedgerRequest { + oneof request_data { + attest.AuthMessage auth = 1; + attest.Message check_key_images = 2; + // TODO: Fill in block query service and merkle proof service. + // Potentially untrusted_tx_out_service? To be decided. + } +} + +message LedgerResponse { + oneof response_data { + attest.AuthMessage auth = 1; + attest.Message check_key_image_response = 2; + // TODO: Fill in block query service and merkle proof service. + // Potentially untrusted_tx_out_service? To be decided. + } +} + +// Identical to FogViewStoreDecryptionError +message FogLedgerStoreDecryptionError { + /// The FogLedgerStoreUri for the specific Fog Ledger Store that + /// tried to decrypt the MultiKeyImageStoreRequest and failed. + /// The client should subsequently authenticate with the machine + /// described by this URI. + string store_uri = 1; + + /// An error message that describes the decryption error. + string error_message = 2; +} + +// Identical to MultiViewStoreQueryRequest +message MultiKeyImageStoreRequest { + /// A list of queries encrypted for Fog Ledger Stores. + repeated attest.NonceMessage queries = 1; +} + + +/// The status associated with a MultiKeyImageStoreQueryResponse +enum MultiKeyImageStoreResponseStatus { + /// Ensure default value (unfilled status) doesn't falsely appear to be a success + UNKNOWN = 0; + /// The Fog Ledger Store successfully fulfilled the request. + SUCCESS = 1; + /// The Fog Ledger Store is unable to decrypt a query within the MultiKeyImageStoreRequest. It needs to be authenticated + /// by the router. + AUTHENTICATION_ERROR = 2; + /// The Fog Ledger Store is not ready to service a MultiLedgerStoreQueryRequest. This might be because the store has + /// not loaded enough blocks yet. + NOT_READY = 3; +} + +message MultiKeyImageStoreResponse { + /// Optional field that gets set when the Fog Ledger Store is able to decrypt a query + /// included in the MultiKeyImageStoreRequest and create a query response for that + // query. This is an encrypted CheckKeyImagesResponse. + attest.NonceMessage query_response = 1; + + /// The FogLedgerStore for the specific Fog Ledger Store that + /// tried to decrypt the MultiLedgerStoreQueryRequest and failed. + /// The client should subsequently authenticate with the machine + /// described by this URI. + string store_uri = 2; + + /// Status that gets returned when the Fog Ledger Store services a MultiKeyImageStoreRequest. + MultiKeyImageStoreResponseStatus status = 3; +} + //// // Merkle proofs //// diff --git a/fog/api/proto/view.proto b/fog/api/proto/view.proto index 556af17668..b103ce8641 100644 --- a/fog/api/proto/view.proto +++ b/fog/api/proto/view.proto @@ -20,7 +20,7 @@ service FogViewRouterAPI { service FogViewRouterAdminAPI { // Adds a shard to the Fog View Router's list of shards to query. - rpc addShard(AddShardRequest) returns (google.protobuf.Empty) {} + rpc addShard(fog_common.AddShardRequest) returns (google.protobuf.Empty) {} } message AddShardRequest { diff --git a/fog/api/src/conversions.rs b/fog/api/src/conversions.rs index 6733b0be10..2e3d434ca5 100644 --- a/fog/api/src/conversions.rs +++ b/fog/api/src/conversions.rs @@ -2,7 +2,9 @@ // // Contains helper methods that enable conversions for Fog Api types. -use crate::{fog_common, ingest_common, view::MultiViewStoreQueryRequest}; +use crate::{ + fog_common, ingest_common, ledger::MultiKeyImageStoreRequest, view::MultiViewStoreQueryRequest, +}; use mc_api::ConversionError; use mc_attest_api::attest; use mc_attest_enclave_api::{EnclaveMessage, NonceSession}; @@ -30,6 +32,25 @@ impl From> for MultiViewStoreQueryRequest { } } +impl From>> for MultiKeyImageStoreRequest { + fn from(enclave_messages: Vec>) -> MultiKeyImageStoreRequest { + enclave_messages + .into_iter() + .map(|enclave_message| enclave_message.into()) + .collect::>() + .into() + } +} + +impl From> for MultiKeyImageStoreRequest { + fn from(attested_query_messages: Vec) -> MultiKeyImageStoreRequest { + let mut multi_key_image_store_request = MultiKeyImageStoreRequest::new(); + multi_key_image_store_request.set_queries(attested_query_messages.into()); + + multi_key_image_store_request + } +} + impl From<&common::BlockRange> for fog_common::BlockRange { fn from(common_block_range: &common::BlockRange) -> fog_common::BlockRange { let mut proto_block_range = fog_common::BlockRange::new(); diff --git a/fog/ledger/connection/Cargo.toml b/fog/ledger/connection/Cargo.toml index df3b83d5e5..d7caf04b63 100644 --- a/fog/ledger/connection/Cargo.toml +++ b/fog/ledger/connection/Cargo.toml @@ -10,12 +10,17 @@ rust-version = { workspace = true } [dependencies] # mobilecoin mc-api = { path = "../../../api" } +mc-attest-ake = { path = "../../../attest/ake" } +mc-attest-core = { path = "../../../attest/core" } mc-attest-verifier = { path = "../../../attest/verifier" } mc-blockchain-types = { path = "../../../blockchain/types" } mc-common = { path = "../../../common", features = ["log"] } mc-crypto-keys = { path = "../../../crypto/keys" } +mc-crypto-noise = { path = "../../../crypto/noise" } +mc-rand = "1.0" mc-transaction-core = { path = "../../../transaction/core" } mc-util-grpc = { path = "../../../util/grpc" } +mc-util-serial = { path = "../../../util/serial" } mc-util-uri = { path = "../../../util/uri" } # fog @@ -25,10 +30,13 @@ mc-fog-types = { path = "../../types" } mc-fog-uri = { path = "../../uri" } # third-party +aes-gcm = "0.10.1" displaydoc = { version = "0.2", default-features = false } +futures = "0.3" grpcio = "0.12.1" protobuf = "2.27.1" retry = "2.0" +sha2 = { version = "0.10", default-features = false } [dev-dependencies] mc-common = { path = "../../../common", features = ["loggers"] } diff --git a/fog/ledger/connection/src/lib.rs b/fog/ledger/connection/src/lib.rs index 0212496c1f..538c3fc7a4 100644 --- a/fog/ledger/connection/src/lib.rs +++ b/fog/ledger/connection/src/lib.rs @@ -18,3 +18,6 @@ pub use merkle_proof::{FogMerkleProofGrpcClient, OutputError, OutputResultExtens mod untrusted; pub use untrusted::FogUntrustedLedgerGrpcClient; + +mod router_client; +pub use router_client::LedgerGrpcClient; diff --git a/fog/ledger/connection/src/router_client.rs b/fog/ledger/connection/src/router_client.rs new file mode 100644 index 0000000000..9d8a64653c --- /dev/null +++ b/fog/ledger/connection/src/router_client.rs @@ -0,0 +1,259 @@ +// Copyright (c) 2018-2023 The MobileCoin Foundation + +use aes_gcm::Aes256Gcm; +use futures::{executor::block_on, SinkExt, TryStreamExt}; +use grpcio::{ChannelBuilder, ClientDuplexReceiver, ClientDuplexSender, Environment}; +use mc_attest_ake::{ + AuthResponseInput, ClientInitiate, Error as AttestAkeError, Ready, Start, Transition, +}; +use mc_attest_core::VerificationReport; +use mc_attest_verifier::Verifier; +use mc_common::logger::{log, o, Logger}; +use mc_crypto_keys::X25519; +use mc_crypto_noise::CipherError; +use mc_fog_api::{ + attest::{AuthMessage, Message}, + ledger::{LedgerRequest, LedgerResponse}, + ledger_grpc::LedgerApiClient, +}; +use mc_fog_types::ledger::{CheckKeyImagesRequest, CheckKeyImagesResponse, KeyImageQuery}; +use mc_fog_uri::FogLedgerUri; +use mc_rand::McRng; +use mc_transaction_core::ring_signature::KeyImage; +use mc_util_grpc::ConnectionUriGrpcioChannel; +use mc_util_serial::DecodeError; +use mc_util_uri::{ConnectionUri, UriConversionError}; +use sha2::Sha512; +use std::sync::Arc; + +/// A high-level object mediating requests to the fog ledger router service +pub struct LedgerGrpcClient { + /// A logger object + logger: Logger, + + /// The URI of the router to communicate with + uri: FogLedgerUri, + + /// An object which can verify a fog node's provided IAS report + verifier: Verifier, + + /// The AKE state machine object, if one is available. + attest_cipher: Option>, + + /// Sends requests to the fog ledger router + request_sender: ClientDuplexSender, + + /// Receives responses from the fog ledger router + response_receiver: ClientDuplexReceiver, + + /// Low-lever ledger API client + _client: LedgerApiClient, +} + +impl LedgerGrpcClient { + /// Creates a new fog ledger router grpc client and opens a streaming + /// connection to the fog ledger router service. + /// + /// Arguments: + /// * uri: The Uri to connect to + /// * verifier: The attestation verifier + /// * env: A grpc environment (thread pool) to use for this connection + /// * logger: For logging + pub fn new( + uri: FogLedgerUri, + verifier: Verifier, + env: Arc, + logger: Logger, + ) -> Self { + let logger = logger.new(o!("mc.fog.ledger.router.uri" => uri.to_string())); + + let ch = ChannelBuilder::default_channel_builder(env).connect_to_uri(&uri, &logger); + let client = LedgerApiClient::new(ch); + let (request_sender, response_receiver) = client + .request() + .expect("Could not retrieve grpc sender and receiver."); + + Self { + logger, + attest_cipher: None, + _client: client, + request_sender, + response_receiver, + uri, + verifier, + } + } + + fn is_attested(&self) -> bool { + self.attest_cipher.is_some() + } + + async fn attest(&mut self) -> Result { + // If we have an existing attestation, nuke it. + self.deattest(); + + let mut csprng = McRng::default(); + + let initiator = Start::new(self.uri.responder_id()?.to_string()); + + let init_input = ClientInitiate::::default(); + let (initiator, auth_request_output) = initiator.try_next(&mut csprng, init_input)?; + + let attested_message: AuthMessage = auth_request_output.into(); + let mut request = LedgerRequest::new(); + request.set_auth(attested_message); + self.request_sender + .send((request.clone(), grpcio::WriteFlags::default())) + .await?; + + let mut response = self + .response_receiver + .try_next() + .await? + .ok_or(Error::ResponseNotReceived)?; + let auth_response_msg = response.take_auth(); + + // Process server response, check if key exchange is successful + let auth_response_event = + AuthResponseInput::new(auth_response_msg.into(), self.verifier.clone()); + let (initiator, verification_report) = + initiator.try_next(&mut csprng, auth_response_event)?; + + self.attest_cipher = Some(initiator); + + Ok(verification_report) + } + + fn deattest(&mut self) { + if self.is_attested() { + log::trace!(self.logger, "Tearing down existing attested connection."); + self.attest_cipher = None; + } + } + + /// Check one or more key images against the ledger router service + pub async fn check_key_images( + &mut self, + key_images: &[KeyImage], + ) -> Result { + log::trace!(self.logger, "Check key images was called"); + if !self.is_attested() { + let verification_report = self.attest().await; + verification_report?; + } + + let key_images_queries = key_images + .iter() + .map(|&key_image| KeyImageQuery { + key_image, + start_block: 0, + }) + .collect(); + let key_images_request = CheckKeyImagesRequest { + queries: key_images_queries, + }; + + // No authenticated data associated with ledger query + let aad = vec![]; + + let msg = { + let attest_cipher = self + .attest_cipher + .as_mut() + .expect("no enclave_connection even though attest succeeded"); + + let mut msg = Message::new(); + msg.set_channel_id(Vec::from(attest_cipher.binding())); + msg.set_aad(aad.clone()); + + let plaintext_bytes = mc_util_serial::encode(&key_images_request); + + let request_ciphertext = attest_cipher.encrypt(&aad, &plaintext_bytes)?; + msg.set_data(request_ciphertext); + msg + }; + let mut request = LedgerRequest::new(); + request.set_check_key_images(msg); + + self.request_sender + .send((request.clone(), grpcio::WriteFlags::default())) + .await?; + + let message = self + .response_receiver + .try_next() + .await? + .ok_or(Error::ResponseNotReceived)? + .take_check_key_image_response(); + + { + let attest_cipher = self + .attest_cipher + .as_mut() + .expect("no enclave_connection even though attest succeeded"); + + let plaintext_bytes = attest_cipher.decrypt(message.get_aad(), message.get_data())?; + let plaintext_response: CheckKeyImagesResponse = + mc_util_serial::decode(&plaintext_bytes)?; + Ok(plaintext_response) + } + } +} + +impl Drop for LedgerGrpcClient { + fn drop(&mut self) { + block_on(self.request_sender.close()).expect("Couldn't close the router request sender"); + } +} + +/// Errors related to the Fog View Router Client. +#[derive(Debug)] +pub enum Error { + /// Decode errors. + Decode(DecodeError), + + /// Uri conversion errors. + UriConversion(UriConversionError), + + /// Cipher errors. + Cipher(CipherError), + + /// Attestation errors. + Attestation(AttestAkeError), + + /// Grpc errors. + Grpc(grpcio::Error), + + /// Response not received + ResponseNotReceived, +} + +impl From for Error { + fn from(err: DecodeError) -> Self { + Self::Decode(err) + } +} + +impl From for Error { + fn from(err: CipherError) -> Self { + Self::Cipher(err) + } +} + +impl From for Error { + fn from(err: grpcio::Error) -> Self { + Self::Grpc(err) + } +} + +impl From for Error { + fn from(err: UriConversionError) -> Self { + Self::UriConversion(err) + } +} + +impl From for Error { + fn from(err: AttestAkeError) -> Self { + Self::Attestation(err) + } +} diff --git a/fog/ledger/enclave/api/src/lib.rs b/fog/ledger/enclave/api/src/lib.rs index 94a05a77c4..bffd48b814 100644 --- a/fog/ledger/enclave/api/src/lib.rs +++ b/fog/ledger/enclave/api/src/lib.rs @@ -14,9 +14,12 @@ pub use crate::{ error::{AddRecordsError, Error}, messages::{EnclaveCall, KeyImageData}, }; -use alloc::vec::Vec; +use alloc::{collections::BTreeMap, vec::Vec}; use core::result::Result as StdResult; -use mc_attest_enclave_api::{ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage}; +use mc_attest_enclave_api::{ + ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, NonceAuthRequest, + NonceAuthResponse, NonceSession, SealedClientMessage, +}; use mc_common::ResponderId; use mc_crypto_keys::X25519Public; pub use mc_fog_types::ledger::{ @@ -94,11 +97,68 @@ pub trait LedgerEnclave: ReportableEnclave { fn check_key_images( &self, msg: EnclaveMessage, - untrusted_keyimagequery_response: UntrustedKeyImageQueryResponse, + response: UntrustedKeyImageQueryResponse, ) -> Result>; /// Add a key image data to the oram Using thrm -rf targete key image fn add_key_image_data(&self, records: Vec) -> Result<()>; + + // LEDGER ROUTER / STORE SYSTEM + + /// Begin a connection to a Fog Ledger Store. The enclave calling this + /// method, most likely a router, will act as a client to the Fog Ledger + /// Store. + fn ledger_store_init(&self, ledger_store_id: ResponderId) -> Result; + + /// Called by a ledger store server to accept an incoming connection from a + /// Fog Ledger Router instance acting as a frontend to the Ledger Store. + fn frontend_accept( + &self, + auth_request: NonceAuthRequest, + ) -> Result<(NonceAuthResponse, NonceSession)>; + + /// Complete the connection to a Fog Ledger Store that has accepted our + /// NonceAuthRequest. This is meant to be called after the enclave has + /// initialized and discovers a new Fog Ledger Store. + fn ledger_store_connect( + &self, + ledger_store_id: ResponderId, + ledger_store_auth_response: NonceAuthResponse, + ) -> Result<()>; + + /// Check to see if a particular key image is present on this key image + /// store. Used by the store server in a router/store system to respond + /// to requests from a ledger router. + fn check_key_image_store( + &self, + msg: EnclaveMessage, + response: UntrustedKeyImageQueryResponse, + ) -> Result>; + + /// Decrypts a client query message and converts it into a + /// SealedClientMessage which can be unsealed multiple times to + /// construct the MultiKeyImageStoreRequest. + fn decrypt_and_seal_query( + &self, + client_query: EnclaveMessage, + ) -> Result; + + /// Transforms a client query request into a list of query request data. + /// + /// The returned list is meant to be used to construct the + /// MultiKeyImageStoreRequest, which is sent to each shard. + fn create_multi_key_image_store_query_data( + &self, + sealed_query: SealedClientMessage, + ) -> Result>>; + + /// Receives all of the shards' query responses and collates them into one + /// query response for the client. + fn collate_shard_query_responses( + &self, + sealed_query: SealedClientMessage, + shard_query_responses: BTreeMap>, + ) -> Result>; } /// Helper trait which reduces boiler-plate in untrusted side diff --git a/fog/ledger/enclave/api/src/messages.rs b/fog/ledger/enclave/api/src/messages.rs index f79bfee004..70e459348b 100644 --- a/fog/ledger/enclave/api/src/messages.rs +++ b/fog/ledger/enclave/api/src/messages.rs @@ -2,9 +2,12 @@ //! The message types used by the ledger_enclave_api. use crate::UntrustedKeyImageQueryResponse; -use alloc::vec::Vec; +use alloc::{collections::BTreeMap, vec::Vec}; use mc_attest_core::{Quote, Report, TargetInfo, VerificationReport}; -use mc_attest_enclave_api::{ClientAuthRequest, ClientSession, EnclaveMessage}; +use mc_attest_enclave_api::{ + ClientAuthRequest, ClientSession, EnclaveMessage, NonceAuthRequest, NonceAuthResponse, + NonceSession, SealedClientMessage, +}; use mc_common::ResponderId; use mc_fog_types::ledger::GetOutputsResponse; use mc_transaction_core::ring_signature::KeyImage; @@ -34,7 +37,7 @@ pub struct KeyImageData { /// An enumeration of API calls and their arguments for use across serialization /// boundaries. -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +#[derive(Clone, Debug, Deserialize, Serialize)] pub enum EnclaveCall { /// The [LedgerEnclave::enclave_init()] method. EnclaveInit(ResponderId, u64), @@ -101,4 +104,50 @@ pub enum EnclaveCall { /// /// Add key image data to the ORAM. AddKeyImageData(Vec), + + /// The [LedgerEnclave::ledger_store_init()] method. + /// + /// Begin a connection to a Fog Ledger Store. The enclave calling this + /// method, most likely a router, will act as a client to the Fog Ledger + /// Store. + LedgerStoreInit(ResponderId), + + /// The [LedgerEnclave::ledger_store_connect()] method. + /// + /// Complete the connection to a Fog Ledger Store that has accepted our + /// ClientAuthRequest. This is meant to be called after the enclave has + /// initialized and discovers a new Fog Ledger Store. + LedgerStoreConnect(ResponderId, NonceAuthResponse), + + /// The [LedgerEnclave::decrypt_and_seal_query()] method. + /// + /// Takes a client query message and returns a SealedClientMessage + /// sealed for the current enclave. + DecryptAndSealQuery(EnclaveMessage), + + /// The [LedgerEnclave::create_multi_key_image_store_query()] method. + /// + /// Transforms a client query request into a list of query request data. + /// + /// The returned list is meant to be used to construct the + /// MultiKeyImageStoreRequest, which is sent to each shard. + CreateMultiKeyImageStoreQueryData(SealedClientMessage), + + /// Collates shard query responses into a single query response for the + /// client. + CollateQueryResponses( + SealedClientMessage, + BTreeMap>, + ), + + /// The [LedgerEnclave::client_check_key_image_store()] method. + /// Store-side Ledger/Router system equivalent to + /// [EnclaveCall::CheckKeyImages] Start a new key image check from a + /// client. + CheckKeyImageStore(EnclaveMessage, UntrustedKeyImageQueryResponse), + + /// The [LedgerEnclave::FrontendAccept()] method. + /// Called by a Store accepting a Router's incoming + /// connection. + FrontendAccept(NonceAuthRequest), } diff --git a/fog/ledger/enclave/impl/Cargo.toml b/fog/ledger/enclave/impl/Cargo.toml index 5521b929b3..bef9cd145b 100644 --- a/fog/ledger/enclave/impl/Cargo.toml +++ b/fog/ledger/enclave/impl/Cargo.toml @@ -12,6 +12,7 @@ rust-version = { workspace = true } # mobilecoin mc-attest-core = { path = "../../../../attest/core", default-features = false } mc-attest-enclave-api = { path = "../../../../attest/enclave-api", default-features = false } +mc-blockchain-types = { path = "../../../../blockchain/types" } mc-common = { path = "../../../../common", default-features = false } mc-crypto-ake-enclave = { path = "../../../../crypto/ake/enclave", default-features = false } mc-crypto-keys = { path = "../../../../crypto/keys", default-features = false } @@ -31,3 +32,7 @@ mc-oblivious-traits = "2.3" # fog mc-fog-ledger-enclave-api = { path = "../api", default-features = false } mc-fog-types = { path = "../../../types" } + +[dev-dependencies] +mc-common = { path = "../../../../common", features = ["loggers"] } +yare = "1.0.2" diff --git a/fog/ledger/enclave/impl/src/lib.rs b/fog/ledger/enclave/impl/src/lib.rs index d37562d7bf..e5c85450d4 100644 --- a/fog/ledger/enclave/impl/src/lib.rs +++ b/fog/ledger/enclave/impl/src/lib.rs @@ -13,10 +13,15 @@ extern crate alloc; mod key_image_store; -use alloc::vec::Vec; +use alloc::{collections::BTreeMap, vec::Vec}; +use core::cmp::max; use key_image_store::{KeyImageStore, StorageDataSize, StorageMetaSize}; use mc_attest_core::{IasNonce, Quote, QuoteNonce, Report, TargetInfo, VerificationReport}; -use mc_attest_enclave_api::{ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage}; +use mc_attest_enclave_api::{ + ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, NonceAuthRequest, + NonceAuthResponse, NonceSession, SealedClientMessage, +}; +use mc_blockchain_types::MAX_BLOCK_VERSION; use mc_common::{ logger::{log, Logger}, ResponderId, @@ -24,7 +29,8 @@ use mc_common::{ use mc_crypto_ake_enclave::{AkeEnclaveState, NullIdentity}; use mc_crypto_keys::X25519Public; use mc_fog_ledger_enclave_api::{ - Error, KeyImageData, LedgerEnclave, OutputContext, Result, UntrustedKeyImageQueryResponse, + Error, KeyImageData, KeyImageResult, LedgerEnclave, OutputContext, Result, + UntrustedKeyImageQueryResponse, }; use mc_fog_types::ledger::{ CheckKeyImagesRequest, CheckKeyImagesResponse, GetOutputsRequest, GetOutputsResponse, @@ -33,6 +39,8 @@ use mc_oblivious_traits::ORAMStorageCreator; use mc_sgx_compat::sync::Mutex; use mc_sgx_report_cache_api::{ReportableEnclave, Result as ReportableEnclaveResult}; +mod oblivious_utils; + /// In-enclave state associated to the ledger enclaves pub struct SgxLedgerEnclave where @@ -191,6 +199,155 @@ where Ok(()) } + + fn ledger_store_init(&self, ledger_store_id: ResponderId) -> Result { + Ok(self.ake.backend_init(ledger_store_id)?) + } + + fn ledger_store_connect( + &self, + ledger_store_id: ResponderId, + ledger_store_auth_response: NonceAuthResponse, + ) -> Result<()> { + Ok(self + .ake + .backend_connect(ledger_store_id, ledger_store_auth_response)?) + } + + fn decrypt_and_seal_query( + &self, + client_query: EnclaveMessage, + ) -> Result { + Ok(self.ake.decrypt_client_message_for_enclave(client_query)?) + } + + fn create_multi_key_image_store_query_data( + &self, + sealed_query: SealedClientMessage, + ) -> Result>> { + Ok(self + .ake + .reencrypt_sealed_message_for_backends(&sealed_query)?) + } + + fn collate_shard_query_responses( + &self, + sealed_query: SealedClientMessage, + shard_query_responses: BTreeMap>, + ) -> Result> { + if shard_query_responses.is_empty() { + return Ok(EnclaveMessage::default()); + } + let channel_id = sealed_query.channel_id.clone(); + let client_query_plaintext = self.ake.unseal(&sealed_query)?; + let client_query_request: CheckKeyImagesRequest = + mc_util_serial::decode(&client_query_plaintext).map_err(|e| { + log::error!(self.logger, "Could not decode client query request: {}", e); + Error::ProstDecode + })?; + + let shard_query_responses = shard_query_responses + .into_iter() + .map(|(responder_id, query_response)| { + let plaintext_bytes = self.ake.backend_decrypt(&responder_id, &query_response)?; + let query_response: CheckKeyImagesResponse = + mc_util_serial::decode(&plaintext_bytes)?; + + Ok(query_response) + }) + .collect::>>()?; + + let num_blocks = shard_query_responses + .iter() + .map(|query_response| query_response.num_blocks) + .min() + .expect("this is only None when the iterator is empty but we early-exit in that case"); + let global_txo_count = shard_query_responses + .iter() + .map(|query_response| query_response.global_txo_count) + .min() + .expect("this is only None when the iterator is empty but we early-exit in that case"); + let latest_block_version = shard_query_responses + .iter() + .map(|query_response| query_response.latest_block_version) + .max() + .expect("this is only None when the iterator is empty but we early-exit in that case"); + + let plaintext_results: Vec = shard_query_responses + .into_iter() + .flat_map(|query_response| query_response.results) + .collect(); + + let oblivious_results = oblivious_utils::collate_shard_key_image_search_results( + client_query_request.queries, + &plaintext_results, + ); + + let max_block_version = max(latest_block_version, *MAX_BLOCK_VERSION); + + let client_query_response = CheckKeyImagesResponse { + num_blocks, + global_txo_count, + results: oblivious_results, + latest_block_version, + max_block_version, + }; + let response_plaintext_bytes = mc_util_serial::encode(&client_query_response); + let response = + self.ake + .client_encrypt(&channel_id, &sealed_query.aad, &response_plaintext_bytes)?; + + Ok(response) + } + + fn check_key_image_store( + &self, + msg: EnclaveMessage, + untrusted_key_image_query_response: UntrustedKeyImageQueryResponse, + ) -> Result> { + let channel_id = msg.channel_id.clone(); + let user_plaintext = self.ake.frontend_decrypt(msg)?; + + let req: CheckKeyImagesRequest = mc_util_serial::decode(&user_plaintext).map_err(|e| { + log::error!(self.logger, "Could not decode user request: {}", e); + Error::ProstDecode + })?; + + let mut resp = CheckKeyImagesResponse { + num_blocks: untrusted_key_image_query_response.highest_processed_block_count, + results: Default::default(), + global_txo_count: untrusted_key_image_query_response + .last_known_block_cumulative_txo_count, + latest_block_version: untrusted_key_image_query_response.latest_block_version, + max_block_version: untrusted_key_image_query_response.max_block_version, + }; + + { + let mut lk = self.key_image_store.lock()?; + let store = lk.as_mut().ok_or(Error::EnclaveNotInitialized)?; + + resp.results = req + .queries + .iter() // get the key images used to find the key image data using the oram + .map(|key| store.find_record(&key.key_image)) + .collect(); + } + + // Encrypt for return to router + let response_plaintext_bytes = mc_util_serial::encode(&resp); + let response = self + .ake + .frontend_encrypt(&channel_id, &[], &response_plaintext_bytes)?; + + Ok(response) + } + + fn frontend_accept( + &self, + auth_request: NonceAuthRequest, + ) -> Result<(NonceAuthResponse, NonceSession)> { + Ok(self.ake.frontend_accept(auth_request)?) + } } #[cfg(test)] diff --git a/fog/ledger/enclave/impl/src/oblivious_utils.rs b/fog/ledger/enclave/impl/src/oblivious_utils.rs new file mode 100644 index 0000000000..eafdf3c44d --- /dev/null +++ b/fog/ledger/enclave/impl/src/oblivious_utils.rs @@ -0,0 +1,272 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Contains methods that allow a Fog View Router enclave to combine all of the +//! Fog View Shard's query responses into one query response that'll be returned +//! for the client. + +use aligned_cmov::{ + subtle::{Choice, ConstantTimeEq}, + CMov, +}; +use alloc::vec::Vec; +use mc_fog_types::ledger::{KeyImageQuery, KeyImageResult, KeyImageResultCode}; +use mc_transaction_core::ring_signature::KeyImage; +use mc_watcher_api::TimestampResultCode; + +/// The default KeyImageResultCode used when collating the shard responses. +const DEFAULT_KEY_IMAGE_SEARCH_RESULT_CODE: KeyImageResultCode = KeyImageResultCode::NotSpent; + +fn default_client_key_image(key_image: KeyImage) -> KeyImageResult { + KeyImageResult { + key_image, + spent_at: 1, // not 0 because it's defined to be >0 in the .proto file + timestamp: u64::MAX, + timestamp_result_code: TimestampResultCode::TimestampFound as u32, + key_image_result_code: DEFAULT_KEY_IMAGE_SEARCH_RESULT_CODE as u32, + } +} + +pub fn collate_shard_key_image_search_results( + client_queries: Vec, + shard_key_image_search_results: &[KeyImageResult], +) -> Vec { + let mut client_key_image_search_results: Vec = client_queries + .iter() + .map(|client_query| default_client_key_image(client_query.key_image)) + .collect(); + + for shard_key_image_search_result in shard_key_image_search_results.iter() { + for client_key_image_search_result in client_key_image_search_results.iter_mut() { + maybe_overwrite_key_image_search_result( + client_key_image_search_result, + shard_key_image_search_result, + ); + } + } + + client_key_image_search_results +} + +fn maybe_overwrite_key_image_search_result( + client_key_image_search_result: &mut KeyImageResult, + shard_key_image_search_result: &KeyImageResult, +) { + let should_overwrite_key_image_search_result = should_overwrite_key_image_search_result( + client_key_image_search_result, + shard_key_image_search_result, + ); + + client_key_image_search_result.key_image_result_code.cmov( + should_overwrite_key_image_search_result, + &shard_key_image_search_result.key_image_result_code, + ); + + client_key_image_search_result.spent_at.cmov( + should_overwrite_key_image_search_result, + &shard_key_image_search_result.spent_at, + ); + + client_key_image_search_result.timestamp.cmov( + should_overwrite_key_image_search_result, + &shard_key_image_search_result.timestamp, + ); + + client_key_image_search_result.timestamp_result_code.cmov( + should_overwrite_key_image_search_result, + &shard_key_image_search_result.timestamp_result_code, + ); +} + +fn should_overwrite_key_image_search_result( + client_key_image_search_result: &KeyImageResult, + shard_key_image_search_result: &KeyImageResult, +) -> Choice { + let client_key_image: &[u8] = client_key_image_search_result.key_image.as_ref(); + let shard_key_image: &[u8] = shard_key_image_search_result.key_image.as_ref(); + let key_images_match = client_key_image.ct_eq(shard_key_image); + + let client_key_image_search_result_code = client_key_image_search_result.key_image_result_code; + let shard_key_image_search_result_code = shard_key_image_search_result.key_image_result_code; + + let client_code_is_default: Choice = + client_key_image_search_result_code.ct_eq(&(DEFAULT_KEY_IMAGE_SEARCH_RESULT_CODE as u32)); + + let shard_code_is_spent: Choice = + shard_key_image_search_result_code.ct_eq(&(KeyImageResultCode::Spent as u32)); + let shard_code_is_error: Choice = + shard_key_image_search_result_code.ct_eq(&(KeyImageResultCode::KeyImageError as u32)); + + // We make the same query to several shards and get several responses, and + // this logic determines how we fill the one client response. + // First, we only update the client response if the shard's key image for this + // result matches the key image for the client result we're considering + // updating. At a high level, we want to prioritize "spent" responses. + // Error responses are "retriable" errors that the client will retry + // after a backoff. The "not spent" response is the default response and + // gets overwritten by any other response. + // "Overwrite key image search result if the key images match and either the + // shard's response is Spent or there is a new KeyImageError result" + let new_error = shard_code_is_error & client_code_is_default; + let should_update = shard_code_is_spent | new_error; + key_images_match & should_update +} + +#[cfg(test)] +mod tests { + extern crate std; + + use super::*; + use std::vec; + use yare::parameterized; + + #[test] + fn differing_key_images_do_not_update() { + let client_result = default_client_key_image(1.into()); + let shard_result = default_client_key_image(2.into()); + let result: bool = + should_overwrite_key_image_search_result(&client_result, &shard_result).into(); + assert!(!result); + } + + #[parameterized( + client_default_shard_is_spent = { KeyImageResultCode::NotSpent, KeyImageResultCode::Spent}, + client_default_shard_is_error = { KeyImageResultCode::NotSpent, KeyImageResultCode::KeyImageError}, + client_error_shard_is_spent = { KeyImageResultCode::KeyImageError, KeyImageResultCode::Spent}, + client_spent_shard_is_spent = { KeyImageResultCode::Spent, KeyImageResultCode::Spent}, + )] + fn should_update(client_code: KeyImageResultCode, shard_code: KeyImageResultCode) { + let mut client_result = default_client_key_image(1.into()); + let mut shard_result = client_result.clone(); + client_result.key_image_result_code = client_code as u32; + shard_result.key_image_result_code = shard_code as u32; + let result: bool = + should_overwrite_key_image_search_result(&client_result, &shard_result).into(); + assert!(result); + } + + #[parameterized( + client_default_shard_is_not_spent = { KeyImageResultCode::NotSpent, KeyImageResultCode::NotSpent}, + client_error_shard_is_not_spent = { KeyImageResultCode::KeyImageError, KeyImageResultCode::NotSpent}, + client_error_shard_is_error = { KeyImageResultCode::KeyImageError, KeyImageResultCode::KeyImageError}, + client_spent_shard_is_not_spent = { KeyImageResultCode::Spent, KeyImageResultCode::NotSpent}, + client_spent_shard_is_error = { KeyImageResultCode::Spent, KeyImageResultCode::KeyImageError}, + )] + fn should_not_update(client_code: KeyImageResultCode, shard_code: KeyImageResultCode) { + let mut client_result = default_client_key_image(1.into()); + let mut shard_result = client_result.clone(); + client_result.key_image_result_code = client_code as u32; + shard_result.key_image_result_code = shard_code as u32; + let result: bool = + should_overwrite_key_image_search_result(&client_result, &shard_result).into(); + assert!(!result); + } + + #[test] + fn all_available() { + let range = 1..=3; + let client_queries = range + .clone() + .map(|key_image| KeyImageQuery { + key_image: key_image.into(), + start_block: 0, + }) + .collect::>(); + let mut shard_results = range + .map(|key_image| KeyImageResult { + key_image: key_image.into(), + spent_at: key_image + 1, + timestamp: key_image + 10, + timestamp_result_code: TimestampResultCode::WatcherBehind as u32, + key_image_result_code: KeyImageResultCode::Spent as u32, + }) + .collect::>(); + let mut results = collate_shard_key_image_search_results(client_queries, &shard_results); + results.sort_by_key(|r| r.key_image); + shard_results.sort_by_key(|r| r.key_image); + assert_eq!(results, shard_results); + } + + #[test] + fn duplicate_shard_results_returns_one_result() { + let client_queries = vec![KeyImageQuery { + key_image: 1.into(), + start_block: 0, + }]; + let key_image_result = KeyImageResult { + key_image: 1.into(), + spent_at: 2, + timestamp: 3, + timestamp_result_code: TimestampResultCode::WatcherBehind as u32, + key_image_result_code: KeyImageResultCode::Spent as u32, + }; + let shard_results = vec![key_image_result.clone(), key_image_result.clone()]; + let mut results = collate_shard_key_image_search_results(client_queries, &shard_results); + results.sort_by_key(|r| r.key_image); + assert_eq!(results, vec![key_image_result]); + } + + #[test] + fn none_available() { + let client_queries = vec![KeyImageQuery { + key_image: 1.into(), + start_block: 0, + }]; + + let shard_results = vec![]; + + let result = collate_shard_key_image_search_results(client_queries, &shard_results); + + assert_eq!(result, vec![default_client_key_image(1.into())]); + } + + #[test] + fn error_result() { + let client_queries = vec![KeyImageQuery { + key_image: 1.into(), + start_block: 0, + }]; + + let key_image_result = KeyImageResult { + key_image: 1.into(), + spent_at: 1, + timestamp: 123, + timestamp_result_code: TimestampResultCode::TimestampFound as u32, + key_image_result_code: KeyImageResultCode::KeyImageError as u32, + }; + let shard_results = vec![key_image_result.clone()]; + + let results = collate_shard_key_image_search_results(client_queries, &shard_results); + + assert_eq!(results, vec![key_image_result]); + } + + #[test] + fn partial_responses() { + let client_queries = vec![ + KeyImageQuery { + key_image: 1.into(), + start_block: 0, + }, + KeyImageQuery { + key_image: 2.into(), + start_block: 0, + }, + ]; + + let key_image_result = KeyImageResult { + key_image: 1.into(), + spent_at: 1, + timestamp: 123, + timestamp_result_code: TimestampResultCode::TimestampFound as u32, + key_image_result_code: KeyImageResultCode::Spent as u32, + }; + let shard_results = vec![key_image_result.clone()]; + + let mut results = collate_shard_key_image_search_results(client_queries, &shard_results); + results.sort_by_key(|r| r.key_image); + assert_eq!( + results, + vec![key_image_result, default_client_key_image(2.into())] + ); + } +} diff --git a/fog/ledger/enclave/src/lib.rs b/fog/ledger/enclave/src/lib.rs index bfab86078b..b60fa58929 100644 --- a/fog/ledger/enclave/src/lib.rs +++ b/fog/ledger/enclave/src/lib.rs @@ -14,7 +14,10 @@ pub use mc_fog_ledger_enclave_api::{ use mc_attest_core::{ IasNonce, Quote, QuoteNonce, Report, SgxError, TargetInfo, VerificationReport, }; -use mc_attest_enclave_api::{ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage}; +use mc_attest_enclave_api::{ + ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, NonceAuthRequest, + NonceAuthResponse, NonceSession, SealedClientMessage, +}; use mc_attest_verifier::DEBUG_ENCLAVE; use mc_common::{logger::Logger, ResponderId}; use mc_crypto_keys::X25519Public; @@ -25,7 +28,7 @@ use mc_sgx_types::{ sgx_attributes_t, sgx_enclave_id_t, sgx_launch_token_t, sgx_misc_attribute_t, sgx_status_t, }; use mc_sgx_urts::SgxEnclave; -use std::{path, result::Result as StdResult, sync::Arc}; +use std::{collections::BTreeMap, path, result::Result as StdResult, sync::Arc}; /// The default filename of the fog ledger's SGX enclave binary. pub const ENCLAVE_FILE: &str = "libledger-enclave.signed.so"; @@ -171,12 +174,9 @@ impl LedgerEnclave for LedgerSgxEnclave { fn check_key_images( &self, msg: EnclaveMessage, - untrusted_keyimagequery_response: UntrustedKeyImageQueryResponse, + response: UntrustedKeyImageQueryResponse, ) -> Result> { - let inbuf = mc_util_serial::serialize(&EnclaveCall::CheckKeyImages( - msg, - untrusted_keyimagequery_response, - ))?; + let inbuf = mc_util_serial::serialize(&EnclaveCall::CheckKeyImages(msg, response))?; let outbuf = self.enclave_call(&inbuf)?; mc_util_serial::deserialize(&outbuf[..])? } @@ -187,6 +187,78 @@ impl LedgerEnclave for LedgerSgxEnclave { let outbuf = self.enclave_call(&inbuf)?; mc_util_serial::deserialize(&outbuf[..])? } + + fn ledger_store_init(&self, ledger_store_id: ResponderId) -> Result { + let inbuf = mc_util_serial::serialize(&EnclaveCall::LedgerStoreInit(ledger_store_id))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn ledger_store_connect( + &self, + ledger_store_id: ResponderId, + ledger_store_auth_response: NonceAuthResponse, + ) -> Result<()> { + let inbuf = mc_util_serial::serialize(&EnclaveCall::LedgerStoreConnect( + ledger_store_id, + ledger_store_auth_response, + ))?; + + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn decrypt_and_seal_query( + &self, + client_query: EnclaveMessage, + ) -> Result { + let inbuf = mc_util_serial::serialize(&EnclaveCall::DecryptAndSealQuery(client_query))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn create_multi_key_image_store_query_data( + &self, + sealed_query: SealedClientMessage, + ) -> Result>> { + let inbuf = mc_util_serial::serialize(&EnclaveCall::CreateMultiKeyImageStoreQueryData( + sealed_query, + ))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn collate_shard_query_responses( + &self, + sealed_query: SealedClientMessage, + shard_query_responses: BTreeMap>, + ) -> Result> { + let inbuf = mc_util_serial::serialize(&EnclaveCall::CollateQueryResponses( + sealed_query, + shard_query_responses, + ))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn check_key_image_store( + &self, + msg: EnclaveMessage, + response: UntrustedKeyImageQueryResponse, + ) -> Result> { + let inbuf = mc_util_serial::serialize(&EnclaveCall::CheckKeyImageStore(msg, response))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } + + fn frontend_accept( + &self, + auth_request: NonceAuthRequest, + ) -> Result<(NonceAuthResponse, NonceSession)> { + let inbuf = mc_util_serial::serialize(&EnclaveCall::FrontendAccept(auth_request))?; + let outbuf = self.enclave_call(&inbuf)?; + mc_util_serial::deserialize(&outbuf[..])? + } } extern "C" { diff --git a/fog/ledger/enclave/trusted/Cargo.lock b/fog/ledger/enclave/trusted/Cargo.lock index b3ee516ef3..4cd6bc7b4b 100644 --- a/fog/ledger/enclave/trusted/Cargo.lock +++ b/fog/ledger/enclave/trusted/Cargo.lock @@ -826,6 +826,29 @@ dependencies = [ "serde", ] +[[package]] +name = "mc-blockchain-types" +version = "4.1.0-pre0" +dependencies = [ + "displaydoc", + "hex_fmt", + "mc-account-keys", + "mc-attest-verifier-types", + "mc-common", + "mc-consensus-scp-types", + "mc-crypto-digestible", + "mc-crypto-digestible-signature", + "mc-crypto-keys", + "mc-crypto-ring-signature", + "mc-transaction-core", + "mc-transaction-types", + "mc-util-from-random", + "mc-util-repr-bytes", + "prost", + "serde", + "zeroize", +] + [[package]] name = "mc-common" version = "4.1.0-pre0" @@ -847,6 +870,18 @@ dependencies = [ "slog", ] +[[package]] +name = "mc-consensus-scp-types" +version = "4.1.0-pre0" +dependencies = [ + "mc-common", + "mc-crypto-digestible", + "mc-crypto-keys", + "mc-util-from-random", + "prost", + "serde", +] + [[package]] name = "mc-core" version = "4.1.0-pre0" @@ -1107,6 +1142,7 @@ dependencies = [ "aligned-cmov", "mc-attest-core", "mc-attest-enclave-api", + "mc-blockchain-types", "mc-common", "mc-crypto-ake-enclave", "mc-crypto-keys", diff --git a/fog/ledger/enclave/trusted/src/lib.rs b/fog/ledger/enclave/trusted/src/lib.rs index ad6177a079..f61ecc34fd 100644 --- a/fog/ledger/enclave/trusted/src/lib.rs +++ b/fog/ledger/enclave/trusted/src/lib.rs @@ -56,11 +56,35 @@ pub fn ecall_dispatcher(inbuf: &[u8]) -> Result, sgx_status_t> { serialize(&ENCLAVE.get_outputs_data(resp, client)) } // Check Key image - EnclaveCall::CheckKeyImages(req, untrusted_keyimagequery_response) => { - serialize(&ENCLAVE.check_key_images(req, untrusted_keyimagequery_response)) + EnclaveCall::CheckKeyImages(req, response) => { + serialize(&ENCLAVE.check_key_images(req, response)) } // Add Key Image Data EnclaveCall::AddKeyImageData(records) => serialize(&ENCLAVE.add_key_image_data(records)), + + // Router / Store system + // Router-side + EnclaveCall::LedgerStoreInit(responder_id) => { + serialize(&ENCLAVE.ledger_store_init(responder_id)) + } + EnclaveCall::LedgerStoreConnect(responder_id, client_auth_response) => { + serialize(&ENCLAVE.ledger_store_connect(responder_id, client_auth_response)) + } + EnclaveCall::DecryptAndSealQuery(client_query) => { + serialize(&ENCLAVE.decrypt_and_seal_query(client_query)) + } + EnclaveCall::CreateMultiKeyImageStoreQueryData(msg) => { + serialize(&ENCLAVE.create_multi_key_image_store_query_data(msg)) + } + EnclaveCall::CollateQueryResponses(sealed_query, shard_query_responses) => { + serialize(&ENCLAVE.collate_shard_query_responses(sealed_query, shard_query_responses)) + } + EnclaveCall::CheckKeyImageStore(req, response) => { + serialize(&ENCLAVE.check_key_image_store(req, response)) + } + EnclaveCall::FrontendAccept(auth_message) => { + serialize(&ENCLAVE.frontend_accept(auth_message)) + } } .or(Err(sgx_status_t::SGX_ERROR_UNEXPECTED)) } diff --git a/fog/ledger/server/Cargo.toml b/fog/ledger/server/Cargo.toml index 0c4fbe3abb..605e9666fc 100644 --- a/fog/ledger/server/Cargo.toml +++ b/fog/ledger/server/Cargo.toml @@ -12,8 +12,12 @@ name = "mc_fog_ledger_server" path = "src/lib.rs" [[bin]] -name = "ledger_server" -path = "src/bin/main.rs" +name = "ledger_router" +path = "src/bin/router.rs" + +[[bin]] +name = "key_image_store" +path = "src/bin/key_image_store.rs" [dependencies] mc-attest-api = { path = "../../../attest/api" } @@ -52,6 +56,7 @@ clap = { version = "4.1", features = ["derive", "env"] } displaydoc = { version = "0.2", default-features = false } futures = "0.3" grpcio = "0.12.1" +itertools = "0.10" lazy_static = "1.4" rand = "0.8" retry = "2.0" @@ -64,12 +69,15 @@ mc-util-build-script = { path = "../../../util/build/script" } mc-util-build-sgx = { path = "../../../util/build/sgx" } [dev-dependencies] +aes-gcm = "0.10.1" # mobilecoin mc-account-keys = { path = "../../../account-keys" } mc-api = { path = "../../../api" } +mc-attest-ake = { path = "../../../attest/ake" } mc-blockchain-test-utils = { path = "../../../blockchain/test-utils" } mc-common = { path = "../../../common", features = ["loggers"] } mc-crypto-keys = { path = "../../../crypto/keys" } +mc-rand = "1.0" mc-util-test-helper = { path = "../../../util/test-helper" } mc-util-uri = { path = "../../../util/uri" } @@ -78,6 +86,9 @@ mc-fog-ledger-connection = { path = "../connection" } mc-fog-ledger-enclave-measurement = { path = "../enclave/measurement" } mc-fog-ledger-test-infra = { path = "../test_infra" } mc-fog-test-infra = { path = "../../test_infra" } +portpicker = "0.1.1" +sha2 = "0.10" # third-party tempfile = "3.4" +tokio = { version = "1.0", features = ["full"] } diff --git a/fog/ledger/server/src/bin/main.rs b/fog/ledger/server/src/bin/key_image_store.rs similarity index 64% rename from fog/ledger/server/src/bin/main.rs rename to fog/ledger/server/src/bin/key_image_store.rs index e87402a819..e0f99ffe8c 100644 --- a/fog/ledger/server/src/bin/main.rs +++ b/fog/ledger/server/src/bin/key_image_store.rs @@ -1,37 +1,22 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation -#![deny(missing_docs)] - -//! Ledger Server target +use clap::Parser; use grpcio::{RpcStatus, RpcStatusCode}; use mc_attest_net::{Client, RaClient}; -use mc_common::{ - logger::{create_app_logger, log, o}, - time::SystemTimeProvider, -}; +use mc_common::{logger::log, time::SystemTimeProvider}; use mc_fog_ledger_enclave::{LedgerSgxEnclave, ENCLAVE_FILE}; -use mc_fog_ledger_server::{LedgerServer, LedgerServerConfig}; +use mc_fog_ledger_server::{KeyImageStoreServer, LedgerStoreConfig, ShardingStrategy}; use mc_ledger_db::LedgerDB; -use mc_util_cli::ParserWithBuildInfo; use mc_util_grpc::AdminServer; use mc_watcher::watcher_db::WatcherDB; + use std::{env, sync::Arc}; fn main() { - let _sentry_guard = mc_common::sentry::init(); - let (logger, _global_logger_guard) = create_app_logger(o!()); + let (logger, _global_logger_guard) = + mc_common::logger::create_app_logger(mc_common::logger::o!()); mc_common::setup_panic_handler(); - - let config = LedgerServerConfig::parse(); - - let _tracer = mc_util_telemetry::setup_default_tracer_with_tags( - env!("CARGO_PKG_NAME"), - &[( - "client_responser_id", - config.client_responder_id.to_string(), - )], - ) - .expect("Failed setting telemetry tracer"); + let config = LedgerStoreConfig::parse(); let enclave_path = env::current_exe() .expect("Could not get the path of our executable") @@ -39,7 +24,9 @@ fn main() { log::info!( logger, "enclave path {}, responder ID {}", - enclave_path.to_str().expect("Could not get enclave path"), + enclave_path + .to_str() + .expect("enclave path is not valid UTF-8"), &config.client_responder_id ); let enclave = LedgerSgxEnclave::new( @@ -49,22 +36,28 @@ fn main() { logger.clone(), ); + //Get our ledger connection started. let db = LedgerDB::open(&config.ledger_db).expect("Could not read ledger DB"); let watcher = WatcherDB::open_ro(&config.watcher_db, logger.clone()).expect("Could not open watcher DB"); + let ias_client = Client::new(&config.ias_api_key).expect("Could not create IAS client"); - let mut server = LedgerServer::new( - config.clone(), - enclave, - db, - watcher, - ias_client, - SystemTimeProvider::default(), - logger.clone(), - ); - server.start().expect("Server failed to start"); + let mut store_server = match config.sharding_strategy.clone() { + ShardingStrategy::Epoch(sharding_strategy) => KeyImageStoreServer::new_from_config( + config.clone(), + enclave, + ias_client, + db, + watcher, + sharding_strategy, + SystemTimeProvider::default(), + logger.clone(), + ), + }; + store_server.start(); + //Initialize the admin api let config2 = config.clone(); let get_config_json = Arc::new(move || { serde_json::to_string(&config2) diff --git a/fog/ledger/server/src/bin/router.rs b/fog/ledger/server/src/bin/router.rs new file mode 100644 index 0000000000..8189093d73 --- /dev/null +++ b/fog/ledger/server/src/bin/router.rs @@ -0,0 +1,63 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use std::env; + +use clap::Parser; +use mc_attest_net::{Client, RaClient}; +use mc_common::logger::log; +use mc_fog_ledger_enclave::{LedgerSgxEnclave, ENCLAVE_FILE}; +use mc_fog_ledger_server::{LedgerRouterConfig, LedgerRouterServer}; +use mc_ledger_db::LedgerDB; +use mc_watcher::watcher_db::WatcherDB; + +fn main() { + let (logger, _global_logger_guard) = + mc_common::logger::create_app_logger(mc_common::logger::o!()); + mc_common::setup_panic_handler(); + let config = LedgerRouterConfig::parse(); + + let enclave_path = env::current_exe() + .expect("Could not get the path of our executable") + .with_file_name(ENCLAVE_FILE); + + if let Some(enclave_path_str) = enclave_path.to_str() { + log::info!( + logger, + "enclave path {}, responder ID {}", + enclave_path_str, + &config.client_responder_id + ); + } else { + log::info!( + logger, + "enclave path {:?}, responder ID {}", + enclave_path, + &config.client_responder_id + ); + log::warn!( + logger, + "enclave path {:?} is not valid Unicode!", + enclave_path + ); + } + + let enclave = LedgerSgxEnclave::new( + enclave_path, + &config.client_responder_id, + config.omap_capacity, + logger.clone(), + ); + + let ledger_db = LedgerDB::open(&config.ledger_db).expect("Could not read ledger DB"); + let watcher_db = + WatcherDB::open_ro(&config.watcher_db, logger.clone()).expect("Could not open watcher DB"); + + let ias_client = Client::new(&config.ias_api_key).expect("Could not create IAS client"); + let mut router_server = + LedgerRouterServer::new(config, enclave, ias_client, ledger_db, watcher_db, logger); + router_server.start(); + + loop { + std::thread::sleep(std::time::Duration::from_millis(1000)); + } +} diff --git a/fog/ledger/server/src/config.rs b/fog/ledger/server/src/config.rs index 273e087fd6..78ad2dc2ec 100644 --- a/fog/ledger/server/src/config.rs +++ b/fog/ledger/server/src/config.rs @@ -4,27 +4,67 @@ #![deny(missing_docs)] +use crate::sharding_strategy::EpochShardingStrategy; use clap::Parser; use mc_attest_core::ProviderId; use mc_common::ResponderId; -use mc_fog_uri::FogLedgerUri; +use mc_fog_uri::{FogLedgerUri, KeyImageStoreUri}; use mc_util_parse::parse_duration_in_seconds; use mc_util_uri::AdminUri; use serde::Serialize; -use std::{path::PathBuf, time::Duration}; +use std::{path::PathBuf, str::FromStr, time::Duration}; -/// Configuration parameters for the ledger server +/// Configuration parameters for the Fog Ledger Router service. #[derive(Clone, Parser, Serialize)] #[clap(version)] -pub struct LedgerServerConfig { +pub struct LedgerRouterConfig { /// The chain id of the network we are a part of #[clap(long, env = "MC_CHAIN_ID")] pub chain_id: String, + /// The ID with which to respond to client attestation requests. + /// + /// This ID needs to match the host:port clients use in their URI when + /// referencing this node. + #[clap(long, env = "MC_CLIENT_RESPONDER_ID")] + pub client_responder_id: ResponderId, + + /// PEM-formatted keypair to send with an Attestation Request. + #[clap(long, env = "MC_IAS_API_KEY")] + pub ias_api_key: String, + + /// The IAS SPID to use when getting a quote + #[clap(long, env = "MC_IAS_SPID")] + pub ias_spid: ProviderId, + /// gRPC listening URI for client requests. #[clap(long, env = "MC_CLIENT_LISTEN_URI")] pub client_listen_uri: FogLedgerUri, + /// gRPC listening URIs for preconfigured Key Image Stores. + #[clap(long, use_value_delimiter = true, env = "MC_KEY_IMAGE_SHARD_URIS")] + pub shard_uris: Vec, + + /// Router admin listening URI. + #[clap(long, env = "MC_ADMIN_LISTEN_URI")] + pub admin_listen_uri: AdminUri, + + /// Number of query attempts with no forward progress + /// before reporting an error. + #[clap(long, default_value = "3")] + pub query_retries: usize, + + /// Enables authenticating client requests using Authorization tokens using + /// the provided hex-encoded 32 bytes shared secret. + #[clap(long, value_parser = mc_util_parse::parse_hex::<[u8; 32]>, env = "MC_CLIENT_AUTH_TOKEN_SECRET")] + pub client_auth_token_secret: Option<[u8; 32]>, + + /// Maximal client authentication token lifetime, in seconds (only relevant + /// when --client-auth-token-secret is used. Defaults to 86400 - 24 + /// hours). + #[clap(long, default_value = "86400", value_parser = parse_duration_in_seconds, env = "MC_CLIENT_AUTH_TOKEN_MAX_LIFETIME")] + pub client_auth_token_max_lifetime: Duration, + /// Path to ledger db (lmdb) #[clap(long, env = "MC_LEDGER_DB")] pub ledger_db: PathBuf, @@ -33,13 +73,48 @@ pub struct LedgerServerConfig { #[clap(long, env = "MC_WATCHER_DB")] pub watcher_db: PathBuf, - /// Client Responder id. + // TODO: Add store instance uris which are of type Vec. + /// The capacity to build the OMAP (ORAM hash table) with. + /// About 75% of this capacity can be used. + /// The hash table will overflow when there are more TxOut's than this, + /// and the server will have to be restarted with a larger number. + /// + /// Note: At time of writing, the hash table will be allocated to use all + /// available SGX EPC memory, and then beyond that it will be allocated on + /// the heap in the untrusted side. Once the needed capacity exceeds RAM, + /// you will either get killed by OOM killer, or it will start being swapped + /// to disk by linux kernel. + #[clap(long, default_value = "1048576", env = "MC_OMAP_CAPACITY")] + pub omap_capacity: u64, +} + +/// Configuration parameters for the Fog Ledger Store service. +#[derive(Clone, Parser, Serialize)] +#[clap(version)] +pub struct LedgerStoreConfig { + /// The chain id of the network we are a part of + #[clap(long, env = "MC_CHAIN_ID")] + pub chain_id: String, + + /// The ID with which to respond to client attestation requests. /// /// This ID needs to match the host:port clients use in their URI when /// referencing this node. #[clap(long, env = "MC_CLIENT_RESPONDER_ID")] pub client_responder_id: ResponderId, + /// gRPC listening URI for client requests. + #[clap(long, env = "MC_CLIENT_LISTEN_URI")] + pub client_listen_uri: KeyImageStoreUri, + + /// Path to ledger db (lmdb) + #[clap(long, value_parser(clap::value_parser!(PathBuf)), env = "MC_LEDGER_DB")] + pub ledger_db: PathBuf, + + /// Path to watcher db (lmdb) - includes block timestamps + #[clap(long, value_parser(clap::value_parser!(PathBuf)), env = "MC_WATCHER_DB")] + pub watcher_db: PathBuf, + /// IAS Api Key. #[clap(long, env = "MC_IAS_API_KEY")] pub ias_api_key: String, @@ -75,4 +150,31 @@ pub struct LedgerServerConfig { /// to disk by linux kernel. #[clap(long, default_value = "1048576", env = "MC_OMAP_CAPACITY")] pub omap_capacity: u64, + + /// Determines which group of Key Images the Key Image Store instance will + /// process. + #[clap(long, default_value = "default", env = "MC_SHARDING_STRATEGY")] + pub sharding_strategy: ShardingStrategy, +} + +/// Enum for parsing strategy from command line w/ clap +#[derive(Clone, Serialize)] +pub enum ShardingStrategy { + /// Epoch strategy (continuous block range) + Epoch(EpochShardingStrategy), +} + +impl FromStr for ShardingStrategy { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.eq("default") { + return Ok(ShardingStrategy::Epoch(EpochShardingStrategy::default())); + } + if let Ok(epoch_sharding_strategy) = EpochShardingStrategy::from_str(s) { + return Ok(ShardingStrategy::Epoch(epoch_sharding_strategy)); + } + + Err("Invalid sharding strategy config.".to_string()) + } } diff --git a/fog/ledger/server/src/db_fetcher.rs b/fog/ledger/server/src/db_fetcher.rs index 53d032ba10..a9fcb56267 100644 --- a/fog/ledger/server/src/db_fetcher.rs +++ b/fog/ledger/server/src/db_fetcher.rs @@ -3,13 +3,14 @@ //! A background thread, in the server side, that continuously checks the //! LedgerDB for new blocks, then gets all the key images associated to those //! blocks and adds them to the enclave. -use crate::{counters, server::DbPollSharedState}; +use crate::{counters, sharding_strategy::ShardingStrategy, DbPollSharedState}; use mc_common::{ logger::{log, Logger}, trace_time, }; use mc_fog_ledger_enclave::LedgerEnclaveProxy; use mc_fog_ledger_enclave_api::KeyImageData; +use mc_fog_types::common::BlockRange; use mc_ledger_db::{self, Error as LedgerError, Ledger}; use mc_util_grpc::ReadinessIndicator; use mc_util_telemetry::{ @@ -18,6 +19,7 @@ use mc_util_telemetry::{ use mc_watcher::watcher_db::WatcherDB; use retry::{delay, retry, OperationResult}; use std::{ + cmp::min, sync::{ atomic::{AtomicBool, Ordering}, Arc, Mutex, @@ -29,8 +31,18 @@ use std::{ /// Telemetry: block index currently being worked on. const TELEMETRY_BLOCK_INDEX_KEY: Key = telemetry_static_key!("block-index"); +/// The number of unloaded available blocks which causes the DbFetcher to be +/// marked unready +const BLOCKS_BEHIND: u64 = 100; + /// An object for managing background data fetches from the ledger database. -pub struct DbFetcher { +pub struct DbFetcher< + DB: Ledger + 'static, + E: LedgerEnclaveProxy + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Send + Sync + 'static, +> { + /// Struct representing the thread and its context. + thread: Option>, /// Join handle used to wait for the thread to terminate. join_handle: Option>, @@ -38,10 +50,16 @@ pub struct DbFetcher { stop_requested: Arc, } -impl DbFetcher { - pub fn new( +impl< + DB: Ledger + 'static, + E: LedgerEnclaveProxy + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Send + Sync + 'static, + > DbFetcher +{ + pub fn new( db: DB, enclave: E, + sharding_strategy: SS, watcher: WatcherDB, db_poll_shared_state: Arc>, readiness_indicator: ReadinessIndicator, @@ -50,30 +68,38 @@ impl DbFetcher { let stop_requested = Arc::new(AtomicBool::new(false)); let thread_stop_requested = stop_requested.clone(); let thread_shared_state = db_poll_shared_state; - let join_handle = Some( - ThreadBuilder::new() - .name("LedgerDbFetcher".to_owned()) - .spawn(move || { - DbFetcherThread::start( - db, - thread_stop_requested, - 0, - enclave, - watcher, - thread_shared_state, - readiness_indicator, - logger, - ) - }) - .expect("Could not spawn thread"), - ); + let thread = Some(DbFetcherThread::new( + db, + thread_stop_requested, + sharding_strategy, + enclave, + watcher, + thread_shared_state, + readiness_indicator, + logger, + )); Self { - join_handle, + thread, + join_handle: None, stop_requested, } } + /// Start running the DbFetcher thread. + pub fn start(&mut self) { + let thread = self + .thread + .take() + .expect("No DbFetcher thread to attempt to spawn"); + self.join_handle = Some( + ThreadBuilder::new() + .name("LedgerDbFetcher".to_owned()) + .spawn(move || thread.run()) + .expect("Could not spawn thread"), + ); + } + /// Stop and join the db poll thread pub fn stop(&mut self) -> Result<(), ()> { if let Some(join_handle) = self.join_handle.take() { @@ -85,16 +111,25 @@ impl DbFetcher { } } -impl Drop for DbFetcher { +impl< + DB: Ledger + 'static, + E: LedgerEnclaveProxy + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Send + Sync + 'static, + > Drop for DbFetcher +{ fn drop(&mut self) { let _ = self.stop(); } } -struct DbFetcherThread { +struct DbFetcherThread< + DB: Ledger, + E: LedgerEnclaveProxy + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Send + Sync + 'static, +> { db: DB, stop_requested: Arc, - next_block_index: u64, + sharding_strategy: SS, enclave: E, watcher: WatcherDB, db_poll_shared_state: Arc>, @@ -104,36 +139,41 @@ struct DbFetcherThread DbFetcherThread { +impl< + DB: Ledger, + E: LedgerEnclaveProxy + Clone + Send + Sync + 'static, + SS: ShardingStrategy + Send + Sync + 'static, + > DbFetcherThread +{ const POLLING_FREQUENCY: Duration = Duration::from_millis(10); const ERROR_RETRY_FREQUENCY: Duration = Duration::from_millis(1000); - pub fn start( + pub fn new( db: DB, stop_requested: Arc, - next_block_index: u64, + sharding_strategy: SS, enclave: E, watcher: WatcherDB, db_poll_shared_state: Arc>, readiness_indicator: ReadinessIndicator, logger: Logger, - ) { - let thread = Self { + ) -> Self { + Self { db, stop_requested, - next_block_index, + sharding_strategy, enclave, watcher, db_poll_shared_state, readiness_indicator, logger, - }; - thread.run(); + } } - fn run(mut self) { + pub fn run(mut self) { log::info!(self.logger, "Db fetcher thread started."); - self.next_block_index = 0; + let block_range = self.sharding_strategy.get_block_range(); + let mut next_block_index = 0; loop { if self.stop_requested.load(Ordering::SeqCst) { log::info!(self.logger, "Db fetcher thread stop requested."); @@ -144,11 +184,16 @@ impl DbFetche // invocation. We want to keep loading blocks as long as we have data to load, // but that could take some time which is why the loop is also gated // on the stop trigger in case a stop is requested during loading. - while self.load_block_data() && !self.stop_requested.load(Ordering::SeqCst) { + while self.load_block_data(&mut next_block_index, &block_range) + && !self.stop_requested.load(Ordering::SeqCst) + { // Hack: If we notice that we are way behind the ledger, set ourselves unready match self.db.num_blocks() { Ok(num_blocks) => { - if num_blocks > self.next_block_index + 100 { + // if there are > BLOCKS_BEHIND *available* blocks we haven't loaded yet, + // set unready + if min(num_blocks, block_range.end_block) > next_block_index + BLOCKS_BEHIND + { self.readiness_indicator.set_unready(); } } @@ -175,20 +220,20 @@ impl DbFetche /// Attempt to load the next block that we /// are aware of and tracking. /// Returns true if we might have more block data to load. - fn load_block_data(&mut self) -> bool { + fn load_block_data(&mut self, next_block_index: &mut u64, block_range: &BlockRange) -> bool { // Default to true: if there is an error, we may have more work, we don't know let mut may_have_more_work = true; let watcher_timeout: Duration = Duration::from_millis(5000); let start_time = SystemTime::now(); - match self.db.get_block_contents(self.next_block_index) { + match self.db.get_block_contents(*next_block_index) { Err(LedgerError::NotFound) => may_have_more_work = false, Err(e) => { log::error!( self.logger, "Unexpected error when checking for block data {}: {:?}", - self.next_block_index, + next_block_index, e ); std::thread::sleep(Self::ERROR_RETRY_FREQUENCY); @@ -197,34 +242,37 @@ impl DbFetche // Tracing let tracer = tracer!(); - let mut span = block_span_builder(&tracer, "poll_block", self.next_block_index) + let mut span = block_span_builder(&tracer, "poll_block", *next_block_index) .with_start_time(start_time) .start(&tracer); - span.set_attribute(TELEMETRY_BLOCK_INDEX_KEY.i64(self.next_block_index as i64)); + span.set_attribute(TELEMETRY_BLOCK_INDEX_KEY.i64(*next_block_index as i64)); let _active = mark_span_as_active(span); - // Get the timestamp for the block. - let timestamp = tracer.in_span("poll_block_timestamp", |_cx| { - self.watcher - .poll_block_timestamp(self.next_block_index, watcher_timeout) - }); + // Only add blocks within the epoch to the ORAM + if block_range.contains(*next_block_index) { + // Get the timestamp for the block. + let timestamp = tracer.in_span("poll_block_timestamp", |_cx| { + self.watcher + .poll_block_timestamp(*next_block_index, watcher_timeout) + }); - // Add block to enclave. - let records = block_contents - .key_images - .iter() - .map(|key_image| KeyImageData { - key_image: *key_image, - block_index: self.next_block_index, - timestamp, - }) - .collect(); - - tracer.in_span("add_records_to_enclave", |_cx| { - self.add_records_to_enclave(self.next_block_index, records); - }); + // Add block to enclave. + let records = block_contents + .key_images + .iter() + .map(|key_image| KeyImageData { + key_image: *key_image, + block_index: *next_block_index, + timestamp, + }) + .collect(); + + tracer.in_span("add_records_to_enclave", |_cx| { + self.add_records_to_enclave(*next_block_index, records); + }); + } // Update shared state. tracer.in_span("update_shared_state", |_cx| { @@ -232,13 +280,13 @@ impl DbFetche self.db_poll_shared_state.lock().expect("mutex poisoned"); // this is next_block_index + 1 because next_block_index is actually the block // we just processed, so we have fully processed next_block_index + 1 blocks - shared_state.highest_processed_block_count = self.next_block_index + 1; + shared_state.highest_processed_block_count = *next_block_index + 1; match self.db.num_txos() { Err(e) => { log::error!( self.logger, "Unexpected error when checking for ledger num txos {}: {:?}", - self.next_block_index, + next_block_index, e ); } @@ -252,7 +300,7 @@ impl DbFetche log::error!( self.logger, "Unexpected error when checking for ledger latest block version {}: {:?}", - self.next_block_index, + next_block_index, e ); } @@ -262,7 +310,7 @@ impl DbFetche } }); - self.next_block_index += 1; + *next_block_index += 1; } } may_have_more_work diff --git a/fog/ledger/server/src/error.rs b/fog/ledger/server/src/error.rs new file mode 100644 index 0000000000..6e95988a9b --- /dev/null +++ b/fog/ledger/server/src/error.rs @@ -0,0 +1,79 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use displaydoc::Display; +use grpcio::RpcStatus; +use mc_common::logger::Logger; +use mc_fog_ledger_enclave_api::Error as LedgerEnclaveError; +use mc_sgx_report_cache_untrusted::Error as ReportCacheError; +use mc_util_grpc::{rpc_internal_error, rpc_permissions_error}; + +#[derive(Debug, Display)] +pub enum RouterServerError { + /// Error related to contacting Fog Ledger Store: {0} + LedgerStoreError(String), + /// Ledger Enclave error: {0} + Enclave(LedgerEnclaveError), +} + +impl From for RouterServerError { + fn from(src: grpcio::Error) -> Self { + RouterServerError::LedgerStoreError(format!("{src}")) + } +} + +impl From for RouterServerError { + fn from(src: mc_common::ResponderIdParseError) -> Self { + RouterServerError::LedgerStoreError(format!("{src}")) + } +} + +impl From for RouterServerError { + fn from(src: mc_util_uri::UriParseError) -> Self { + RouterServerError::LedgerStoreError(format!("{src}")) + } +} + +impl From for RouterServerError { + fn from(src: mc_util_uri::UriConversionError) -> Self { + RouterServerError::LedgerStoreError(format!("{src}")) + } +} + +pub fn router_server_err_to_rpc_status( + context: &str, + src: RouterServerError, + logger: Logger, +) -> RpcStatus { + match src { + RouterServerError::LedgerStoreError(_) => { + rpc_internal_error(context, format!("{src}"), &logger) + } + RouterServerError::Enclave(_) => rpc_permissions_error(context, format!("{src}"), &logger), + } +} + +impl From for RouterServerError { + fn from(src: LedgerEnclaveError) -> Self { + RouterServerError::Enclave(src) + } +} + +#[derive(Display)] +pub enum LedgerServerError { + /// Ledger Enclave error: {0} + Enclave(LedgerEnclaveError), + /// Report cache error: {0} + ReportCache(ReportCacheError), +} + +impl From for LedgerServerError { + fn from(src: LedgerEnclaveError) -> Self { + LedgerServerError::Enclave(src) + } +} + +impl From for LedgerServerError { + fn from(src: ReportCacheError) -> Self { + Self::ReportCache(src) + } +} diff --git a/fog/ledger/server/src/key_image_service.rs b/fog/ledger/server/src/key_image_service.rs index 6959ae1348..64f2b9f757 100644 --- a/fog/ledger/server/src/key_image_service.rs +++ b/fog/ledger/server/src/key_image_service.rs @@ -1,26 +1,30 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation -use crate::{server::DbPollSharedState, SVC_COUNTERS}; -use grpcio::{RpcContext, RpcStatus, UnarySink}; -use mc_attest_api::{ - attest, - attest::{AuthMessage, Message}, -}; +use crate::{DbPollSharedState, SVC_COUNTERS}; +use grpcio::RpcStatus; +use mc_attest_api::{attest, attest::AuthMessage}; use mc_blockchain_types::MAX_BLOCK_VERSION; use mc_common::logger::{log, Logger}; -use mc_fog_api::ledger_grpc::FogKeyImageApi; +use mc_fog_api::{ + ledger::{ + MultiKeyImageStoreRequest, MultiKeyImageStoreResponse, MultiKeyImageStoreResponseStatus, + }, + ledger_grpc::KeyImageStoreApi, +}; use mc_fog_ledger_enclave::LedgerEnclaveProxy; use mc_fog_ledger_enclave_api::{Error as EnclaveError, UntrustedKeyImageQueryResponse}; +use mc_fog_uri::{ConnectionUri, KeyImageStoreUri}; use mc_ledger_db::Ledger; use mc_util_grpc::{ - check_request_chain_id, rpc_internal_error, rpc_invalid_arg_error, rpc_logger, - rpc_permissions_error, send_result, Authenticator, + rpc_internal_error, rpc_invalid_arg_error, rpc_logger, rpc_permissions_error, send_result, + Authenticator, }; use mc_watcher::watcher_db::WatcherDB; use std::sync::{Arc, Mutex}; #[derive(Clone)] pub struct KeyImageService { - chain_id: String, + /// The ClientListenUri for this Fog Ledger Service. + client_listen_uri: KeyImageStoreUri, ledger: L, watcher: WatcherDB, enclave: E, @@ -32,7 +36,7 @@ pub struct KeyImageService { impl KeyImageService { pub fn new( - chain_id: String, + client_listen_uri: KeyImageStoreUri, ledger: L, watcher: WatcherDB, enclave: E, @@ -41,7 +45,7 @@ impl KeyImageService { logger: Logger, ) -> Self { Self { - chain_id, + client_listen_uri, ledger, watcher, enclave, @@ -63,18 +67,68 @@ impl KeyImageService { self.db_poll_shared_state.clone() } - /// Unwrap and forward to enclave - // self.enclave.check_key_images should take both an AttestMessage and an - // UntrustedKeyImageQueryResponse object that contains any data that is - // needed that isn't in the ORAM. This might be like "num_blocks" and similar - // stuff. self.enclave.check_key_images should return an AttestMessage that - // we send back to the user. - fn check_key_images_auth( + pub fn auth_store( &mut self, - request: attest::Message, - ) -> Result { - log::trace!(self.logger, "Getting encrypted request"); + mut req: AuthMessage, + logger: &Logger, + ) -> Result { + // TODO: Use the prost message directly, once available + match self.enclave.frontend_accept(req.take_data().into()) { + Ok((response, _)) => { + let mut result = attest::AuthMessage::new(); + result.set_data(response.into()); + Ok(result) + } + Err(client_error) => { + // There's no requirement on the remote party to trigger this, so it's debug. + log::debug!( + logger, + "KeyImageStoreApi::frontend_accept failed: {}", + client_error + ); + let rpc_permissions_error = rpc_permissions_error( + "auth_store", + format!("Permission denied: {client_error}"), + logger, + ); + Err(rpc_permissions_error) + } + } + } + + pub fn auth_service( + &mut self, + mut req: AuthMessage, + logger: &Logger, + ) -> Result { + // TODO: Use the prost message directly, once available + match self.enclave.client_accept(req.take_data().into()) { + Ok((response, _)) => { + let mut result = attest::AuthMessage::new(); + result.set_data(response.into()); + Ok(result) + } + Err(client_error) => { + // There's no requirement on the remote party to trigger this, so it's debug. + log::debug!( + logger, + "KeyImageStoreApi::client_accept failed: {}", + client_error + ); + let rpc_permissions_error = rpc_permissions_error( + "client_auth", + format!("Permission denied: {client_error}"), + logger, + ); + Err(rpc_permissions_error) + } + } + } + /// Generate an UntrustedKeyImageQueryResponse + /// for use in [KeyImageService::check_key_images_auth()] + /// and [KeyImageService::check_key_image_store_auth()] + fn prepare_untrusted_query(&mut self) -> UntrustedKeyImageQueryResponse { let ( highest_processed_block_count, last_known_block_cumulative_txo_count, @@ -88,21 +142,34 @@ impl KeyImageService { ) }; - let untrusted_query_response = UntrustedKeyImageQueryResponse { + UntrustedKeyImageQueryResponse { highest_processed_block_count, last_known_block_cumulative_txo_count, latest_block_version, max_block_version: latest_block_version.max(*MAX_BLOCK_VERSION), - }; + } + } - let result_blob = self + /// Unwrap and forward to enclave + // self.enclave.check_key_images should take both a NonceMessage and an + // UntrustedKeyImageQueryResponse object that contains any data that is + // needed that isn't in the ORAM. This might be like "num_blocks" and similar + // stuff. self.enclave.check_key_images should return an AttestMessage that + // we send back to the user. + fn check_key_image_store_auth( + &mut self, + request: attest::NonceMessage, + ) -> Result { + log::trace!(self.logger, "Getting encrypted request"); + + let untrusted_query_response = self.prepare_untrusted_query(); + + let response = self .enclave - .check_key_images(request.into(), untrusted_query_response) + .check_key_image_store(request.into(), untrusted_query_response) .map_err(|e| self.enclave_err_to_rpc_status("enclave request", e))?; - let mut resp = attest::Message::new(); - resp.set_data(result_blob); - Ok(resp) + Ok(response.into()) } // Helper function that is common @@ -118,61 +185,86 @@ impl KeyImageService { other => rpc_internal_error(context, format!("{}", &other), &self.logger), } } -} -impl FogKeyImageApi for KeyImageService { - fn check_key_images(&mut self, ctx: RpcContext, request: Message, sink: UnarySink) { - let _timer = SVC_COUNTERS.req(&ctx); - mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { - if let Err(err) = check_request_chain_id(&self.chain_id, &ctx) { - return send_result(ctx, sink, Err(err), logger); - } + /// Handle MultiKeyImageStoreRequest contents sent by a router to this + /// store. + fn process_queries( + &mut self, + fog_ledger_store_uri: KeyImageStoreUri, + queries: Vec, + ) -> MultiKeyImageStoreResponse { + let mut response = MultiKeyImageStoreResponse::new(); + // The router needs our own URI, in case auth fails / hasn't been started yet. + response.set_store_uri(fog_ledger_store_uri.url().to_string()); - if let Err(err) = self.authenticator.authenticate_rpc(&ctx) { - return send_result(ctx, sink, err.into(), logger); + for query in queries.into_iter() { + // Only one of the query messages in the multi-store query is intended for this + // store. It's a bit of a broadcast model - all queries are sent to + // all stores, and then the stores evaluate which message is meant + // for them. + if let Ok(attested_message) = self.check_key_image_store_auth(query) { + response.set_query_response(attested_message); + response.set_status(MultiKeyImageStoreResponseStatus::SUCCESS); + //Note that set_fog_ledger_store_uri has been taken care of above. + + return response; } + } - send_result(ctx, sink, self.check_key_images_auth(request), logger) - }) + // TODO: different response code for "none found matching" from "authentication + // error," potentially? + + response.set_status(MultiKeyImageStoreResponseStatus::AUTHENTICATION_ERROR); + response } +} - fn auth(&mut self, ctx: RpcContext, request: AuthMessage, sink: UnarySink) { +impl KeyImageStoreApi for KeyImageService { + fn auth( + &mut self, + ctx: grpcio::RpcContext, + req: AuthMessage, + sink: grpcio::UnarySink, + ) { let _timer = SVC_COUNTERS.req(&ctx); mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { - if let Err(err) = check_request_chain_id(&self.chain_id, &ctx) { - return send_result(ctx, sink, Err(err), logger); - } - if let Err(err) = self.authenticator.authenticate_rpc(&ctx) { return send_result(ctx, sink, err.into(), logger); } - // TODO: Use the prost message directly, once available - match self.enclave.client_accept(request.into()) { - Ok((response, _session_id)) => { - send_result(ctx, sink, Ok(response.into()), logger); + match self.auth_store(req, logger) { + Ok(response) => { + send_result(ctx, sink, Ok(response), logger); } Err(client_error) => { // This is debug because there's no requirement on the remote party to trigger // it. log::info!( logger, - "LedgerEnclave::client_accept failed: {}", + "LedgerEnclave::frontend_accept failed: {}", client_error ); // TODO: increment failed inbound peering counter. - send_result( - ctx, - sink, - Err(rpc_permissions_error( - "client_auth", - "Permission denied", - logger, - )), - logger, - ); + send_result(ctx, sink, Err(client_error), logger); } } }); } + + fn multi_key_image_store_query( + &mut self, + ctx: grpcio::RpcContext, + req: MultiKeyImageStoreRequest, + sink: grpcio::UnarySink, + ) { + let _timer = SVC_COUNTERS.req(&ctx); + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + if let Err(err) = self.authenticator.authenticate_rpc(&ctx) { + return send_result(ctx, sink, err.into(), logger); + } + let response = + self.process_queries(self.client_listen_uri.clone(), req.queries.into_vec()); + send_result(ctx, sink, Ok(response), logger) + }); + } } diff --git a/fog/ledger/server/src/key_image_store_server.rs b/fog/ledger/server/src/key_image_store_server.rs new file mode 100644 index 0000000000..2e0032141b --- /dev/null +++ b/fog/ledger/server/src/key_image_store_server.rs @@ -0,0 +1,228 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use std::sync::{Arc, Mutex}; + +use futures::executor::block_on; +use mc_attest_core::ProviderId; +use mc_attest_net::RaClient; +use mc_common::{ + logger::{log, Logger}, + time::TimeProvider, +}; +use mc_fog_api::ledger_grpc; +use mc_fog_ledger_enclave::LedgerEnclaveProxy; +use mc_fog_uri::{ConnectionUri, KeyImageStoreUri}; +use mc_ledger_db::LedgerDB; +use mc_sgx_report_cache_untrusted::ReportCacheThread; +use mc_util_grpc::{ + AnonymousAuthenticator, Authenticator, ConnectionUriGrpcioServer, ReadinessIndicator, + TokenAuthenticator, +}; +use mc_watcher::watcher_db::WatcherDB; + +use crate::{ + config::LedgerStoreConfig, counters, db_fetcher::DbFetcher, + sharding_strategy::ShardingStrategy, DbPollSharedState, KeyImageService, +}; + +pub struct KeyImageStoreServer +where + E: LedgerEnclaveProxy, + SS: ShardingStrategy + Send + Sync + 'static, + RC: RaClient + Send + Sync + 'static, +{ + server: grpcio::Server, + client_listen_uri: KeyImageStoreUri, + db_fetcher: DbFetcher, + enclave: E, + ra_client: RC, + report_cache_thread: Option, + ias_spid: ProviderId, + logger: Logger, +} + +impl KeyImageStoreServer +where + E: LedgerEnclaveProxy, + SS: ShardingStrategy + Send + Sync + 'static, + RC: RaClient + Send + Sync + 'static, +{ + /// Creates a new key image store server instance + pub fn new_from_config( + config: LedgerStoreConfig, + enclave: E, + ra_client: RC, + ledger: LedgerDB, + watcher: WatcherDB, + sharding_strategy: SS, + time_provider: impl TimeProvider + 'static, + logger: Logger, + ) -> KeyImageStoreServer { + let client_authenticator: Arc = + if let Some(shared_secret) = config.client_auth_token_secret.as_ref() { + Arc::new(TokenAuthenticator::new( + *shared_secret, + config.client_auth_token_max_lifetime, + time_provider, + )) + } else { + Arc::new(AnonymousAuthenticator::default()) + }; + + Self::new( + client_authenticator, + config.client_listen_uri, + enclave, + ra_client, + config.ias_spid, + ledger, + watcher, + sharding_strategy, + logger, + ) + } + + pub fn new( + client_authenticator: Arc, + client_listen_uri: KeyImageStoreUri, + enclave: E, + ra_client: RC, + ias_spid: ProviderId, + ledger: LedgerDB, + watcher: WatcherDB, + sharding_strategy: SS, + logger: Logger, + ) -> KeyImageStoreServer { + let shared_state = Arc::new(Mutex::new(DbPollSharedState::default())); + + let use_tls = client_listen_uri.use_tls(); + let responder_id = client_listen_uri + .responder_id() + .expect("Could not get store responder ID"); + let uri = KeyImageStoreUri::try_from_responder_id(responder_id, use_tls) + .expect("Could not create URI from Responder ID"); + + let key_image_service = KeyImageService::new( + uri, + ledger, + watcher, + enclave.clone(), + shared_state, + client_authenticator, + logger.clone(), + ); + Self::new_from_service( + key_image_service, + client_listen_uri, + enclave, + ra_client, + ias_spid, + sharding_strategy, + logger, + ) + } + + pub fn new_from_service( + mut key_image_service: KeyImageService, + client_listen_uri: KeyImageStoreUri, + enclave: E, + ra_client: RC, + ias_spid: ProviderId, + sharding_strategy: SS, + logger: Logger, + ) -> KeyImageStoreServer { + let readiness_indicator = ReadinessIndicator::default(); + + let env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("key-image-store-server".to_string()) + .build(), + ); + + // Health check service + let health_service = mc_util_grpc::HealthService::new( + Some(readiness_indicator.clone().into()), + logger.clone(), + ) + .into_service(); + + // Build our store server. + // Init ledger store service. + let ledger_store_service = + ledger_grpc::create_key_image_store_api(key_image_service.clone()); + log::debug!(logger, "Constructed Key Image Store GRPC Service"); + + // Package service into grpc server + log::info!( + logger, + "Starting Key Image Store server on {}", + client_listen_uri.addr(), + ); + + let server = grpcio::ServerBuilder::new(env) + .register_service(ledger_store_service) + .register_service(health_service) + .build_using_uri(&client_listen_uri, logger.clone()) + .expect("Could not build Key Image Store Server"); + + let db_fetcher = DbFetcher::new( + key_image_service.get_ledger(), + enclave.clone(), + sharding_strategy, + key_image_service.get_watcher(), + key_image_service.get_db_poll_shared_state(), + readiness_indicator, + logger.clone(), + ); + + Self { + server, + client_listen_uri, + db_fetcher, + enclave, + ra_client, + ias_spid, + report_cache_thread: None, + logger, + } + } + + /// Starts the server + pub fn start(&mut self) { + self.report_cache_thread = Some( + ReportCacheThread::start( + self.enclave.clone(), + self.ra_client.clone(), + self.ias_spid, + &counters::ENCLAVE_REPORT_TIMESTAMP, + self.logger.clone(), + ) + .expect("failed starting report cache thread"), + ); + + self.server.start(); + log::info!( + self.logger, + "API listening on {}", + self.client_listen_uri.addr() + ); + self.db_fetcher.start(); + } + + /// Stops the server + pub fn stop(&mut self) { + block_on(self.server.shutdown()).expect("Could not stop grpc server"); + self.db_fetcher.stop().expect("Could not stop DbFetcher"); + } +} + +impl Drop for KeyImageStoreServer +where + E: LedgerEnclaveProxy, + SS: ShardingStrategy + Send + Sync + 'static, + RC: RaClient + Send + Sync + 'static, +{ + fn drop(&mut self) { + self.stop(); + } +} diff --git a/fog/ledger/server/src/lib.rs b/fog/ledger/server/src/lib.rs index 0eb4c73b8e..132ae5cca4 100644 --- a/fog/ledger/server/src/lib.rs +++ b/fog/ledger/server/src/lib.rs @@ -1,24 +1,46 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation #![allow(clippy::result_large_err)] +pub use block_service::BlockService; +pub use config::{LedgerRouterConfig, LedgerStoreConfig, ShardingStrategy}; +pub use key_image_service::KeyImageService; +pub use key_image_store_server::KeyImageStoreServer; +pub use merkle_proof_service::MerkleProofService; +pub use router_server::LedgerRouterServer; +pub use untrusted_tx_out_service::UntrustedTxOutService; + +pub mod sharding_strategy; + mod block_service; mod config; mod counters; mod db_fetcher; +mod error; mod key_image_service; +mod key_image_store_server; mod merkle_proof_service; -mod server; +mod router_admin_service; +mod router_handlers; +mod router_server; +mod router_service; mod untrusted_tx_out_service; use mc_util_metrics::ServiceMetrics; -pub use block_service::BlockService; -pub use config::LedgerServerConfig; -pub use key_image_service::KeyImageService; -pub use merkle_proof_service::MerkleProofService; -pub use server::LedgerServer; -pub use untrusted_tx_out_service::UntrustedTxOutService; - lazy_static::lazy_static! { pub static ref SVC_COUNTERS: ServiceMetrics = ServiceMetrics::new_and_registered("fog_ledger"); } + +/// State that we want to expose from the db poll thread +#[derive(Debug, Default)] +pub struct DbPollSharedState { + /// The highest block count for which we can guarantee we have loaded all + /// available data. + pub highest_processed_block_count: u64, + + /// The cumulative txo count of the last known block. + pub last_known_block_cumulative_txo_count: u64, + + /// The latest value of `block_version` in the blockchain + pub latest_block_version: u32, +} diff --git a/fog/ledger/server/src/router_admin_service.rs b/fog/ledger/server/src/router_admin_service.rs new file mode 100644 index 0000000000..2e08e42757 --- /dev/null +++ b/fog/ledger/server/src/router_admin_service.rs @@ -0,0 +1,85 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use crate::SVC_COUNTERS; +use grpcio::{ChannelBuilder, RpcContext, RpcStatus, UnarySink}; +use itertools::Itertools; +use mc_common::logger::{log, Logger}; +use mc_fog_api::{ + fog_common::AddShardRequest, + ledger_grpc::{KeyImageStoreApiClient, LedgerRouterAdminApi}, +}; +use mc_fog_uri::KeyImageStoreUri; +use mc_util_grpc::{ + rpc_invalid_arg_error, rpc_logger, rpc_precondition_error, send_result, + ConnectionUriGrpcioChannel, Empty, +}; +use std::{ + collections::HashMap, + str::FromStr, + sync::{Arc, RwLock}, +}; + +#[derive(Clone)] +pub struct LedgerRouterAdminService { + shard_clients: Arc>>>, + logger: Logger, +} + +impl LedgerRouterAdminService { + #[allow(dead_code)] + pub fn new( + shard_clients: Arc>>>, + logger: Logger, + ) -> Self { + Self { + shard_clients, + logger, + } + } + + fn add_shard_impl(&mut self, shard_uri: &str, logger: &Logger) -> Result { + let key_image_store_uri = KeyImageStoreUri::from_str(shard_uri).map_err(|_| { + rpc_invalid_arg_error( + "add_shard", + format!("Shard uri string {shard_uri} is invalid"), + logger, + ) + })?; + let mut shard_clients = self.shard_clients.write().expect("RwLock Poisoned"); + if shard_clients.keys().contains(&key_image_store_uri) { + let error = rpc_precondition_error( + "add_shard", + format!("Shard uri {shard_uri} already exists in the shard list"), + logger, + ); + return Err(error); + } + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("add-shard".to_string()) + .build(), + ); + let key_image_store_client = KeyImageStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env) + .connect_to_uri(&key_image_store_uri, logger), + ); + shard_clients.insert(key_image_store_uri, Arc::new(key_image_store_client)); + + Ok(Empty::new()) + } +} + +impl LedgerRouterAdminApi for LedgerRouterAdminService { + fn add_shard(&mut self, ctx: RpcContext, request: AddShardRequest, sink: UnarySink) { + log::info!(self.logger, "Request received in add_shard fn"); + let _timer = SVC_COUNTERS.req(&ctx); + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + send_result( + ctx, + sink, + self.add_shard_impl(request.get_shard_uri(), logger), + logger, + ); + }); + } +} diff --git a/fog/ledger/server/src/router_handlers.rs b/fog/ledger/server/src/router_handlers.rs new file mode 100644 index 0000000000..e7ce6a2768 --- /dev/null +++ b/fog/ledger/server/src/router_handlers.rs @@ -0,0 +1,392 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use crate::{ + error::{router_server_err_to_rpc_status, RouterServerError}, + SVC_COUNTERS, +}; +use futures::{future::try_join_all, SinkExt, TryStreamExt}; +use grpcio::{ChannelBuilder, DuplexSink, RequestStream, RpcStatus, WriteFlags}; +use mc_attest_api::attest; +use mc_attest_enclave_api::{EnclaveMessage, NonceSession}; +use mc_common::{ + logger::{log, Logger}, + ResponderId, +}; +use mc_fog_api::{ + ledger::{ + LedgerRequest, LedgerRequest_oneof_request_data, LedgerResponse, MultiKeyImageStoreRequest, + MultiKeyImageStoreResponse, MultiKeyImageStoreResponseStatus, + }, + ledger_grpc::KeyImageStoreApiClient, +}; +use mc_fog_ledger_enclave::LedgerEnclaveProxy; +use mc_fog_uri::{ConnectionUri, KeyImageStoreUri}; +use mc_util_grpc::{rpc_invalid_arg_error, ConnectionUriGrpcioChannel, ResponseStatus}; +use mc_util_metrics::GrpcMethodName; +use mc_util_telemetry::{create_context, tracer, BoxedTracer, FutureExt, Tracer}; +use std::{collections::BTreeMap, str::FromStr, sync::Arc}; + +/// Handles a series of requests sent by the Fog Ledger Router client, +/// routing them out to shards. +pub async fn handle_requests( + method_name: GrpcMethodName, + shard_clients: Vec>, + enclave: E, + mut requests: RequestStream, + mut responses: DuplexSink, + query_retries: usize, + logger: Logger, +) -> Result<(), grpcio::Error> +where + E: LedgerEnclaveProxy, +{ + while let Some(request) = requests.try_next().await? { + // Per the comment thread on pull request #2976, this should be + // req_impl() and not req(). + // This is so that one call of the original request() method is + // reported per each actual request the client sends. + let _timer = SVC_COUNTERS.req_impl(&method_name); + + let result = handle_request( + request, + shard_clients.clone(), + enclave.clone(), + query_retries, + logger.clone(), + ) + .await; + + let response_status = ResponseStatus::from(&result); + SVC_COUNTERS.resp_impl(&method_name, response_status.is_success); + SVC_COUNTERS.status_code_impl(&method_name, response_status.code); + + match result { + Ok(response) => responses.send((response, WriteFlags::default())).await?, + Err(rpc_status) => return responses.fail(rpc_status).await, + } + } + responses.close().await?; + Ok(()) +} + +/// Handles a client's request by performing either an authentication or a +/// query. +pub async fn handle_request( + request: LedgerRequest, + shard_clients: Vec>, + enclave: E, + query_retries: usize, + logger: Logger, +) -> Result +where + E: LedgerEnclaveProxy, +{ + let tracer = tracer!(); + match request.request_data { + Some(LedgerRequest_oneof_request_data::auth(request)) => { + tracer.in_span("auth", |_cx| handle_auth_request(enclave, request, logger)) + } + Some(LedgerRequest_oneof_request_data::check_key_images(request)) => { + handle_query_request( + request, + enclave, + shard_clients, + query_retries, + logger, + &tracer, + ) + .with_context(create_context(&tracer, "check_key_images")) + .await + } + None => { + let rpc_status = rpc_invalid_arg_error( + "Inavlid LedgerRequest request", + "Neither the check_key_images nor auth fields were set".to_string(), + &logger, + ); + Err(rpc_status) + } + } +} + +/// The result of processing the MultiLedgerStoreQueryResponse from each Fog +/// Ledger Shard. +pub struct ProcessedShardResponseData { + /// gRPC clients for Shards that need to be retried for a successful + /// response. + pub shard_clients_for_retry: Vec>, + + /// Uris for individual Fog Ledger Stores that need to be authenticated with + /// by the Fog Router. It should only have entries if + /// `shard_clients_for_retry` has entries. + pub store_uris_for_authentication: Vec, + + /// New, successfully processed query responses. + pub new_query_responses: Vec<(ResponderId, attest::NonceMessage)>, +} + +impl ProcessedShardResponseData { + pub fn new( + shard_clients_for_retry: Vec>, + store_uris_for_authentication: Vec, + new_query_responses: Vec<(ResponderId, attest::NonceMessage)>, + ) -> Self { + ProcessedShardResponseData { + shard_clients_for_retry, + store_uris_for_authentication, + new_query_responses, + } + } +} + +/// Processes the MultiKeyImageStoreResponses returned by each Ledger Shard. +pub fn process_shard_responses( + clients_and_responses: Vec<(Arc, MultiKeyImageStoreResponse)>, + logger: Logger, +) -> Result { + let mut shard_clients_for_retry = Vec::new(); + let mut store_uris_for_authentication = Vec::new(); + let mut new_query_responses = Vec::new(); + + for (shard_client, mut response) in clients_and_responses { + let store_uri = KeyImageStoreUri::from_str(response.get_store_uri())?; + match response.get_status() { + MultiKeyImageStoreResponseStatus::SUCCESS => { + let store_responder_id = store_uri.host_and_port_responder_id()?; + new_query_responses.push((store_responder_id, response.take_query_response())); + } + MultiKeyImageStoreResponseStatus::AUTHENTICATION_ERROR => { + // We did not receive a query response for this shard.Therefore, we need to: + // (a) retry the query + // (b) authenticate with the Ledger Store that returned the decryption_error + shard_clients_for_retry.push(shard_client); + store_uris_for_authentication.push(store_uri); + } + // This call will be retried as part of the larger retry logic + MultiKeyImageStoreResponseStatus::NOT_READY => (), + // This is an unexpected error - we should never see this + MultiKeyImageStoreResponseStatus::UNKNOWN => { + log::error!( + logger, + "Received a response with status 'UNKNOWN' from store {}", + KeyImageStoreUri::from_str(&response.store_uri)? + ); + } + } + } + + Ok(ProcessedShardResponseData::new( + shard_clients_for_retry, + store_uris_for_authentication, + new_query_responses, + )) +} + +/// Handles a client's authentication request. +pub(crate) fn handle_auth_request( + enclave: E, + auth_message: attest::AuthMessage, + logger: Logger, +) -> Result +where + E: LedgerEnclaveProxy, +{ + let (client_auth_response, _) = enclave.client_accept(auth_message.into()).map_err(|err| { + router_server_err_to_rpc_status("Auth: e client accept", err.into(), logger) + })?; + + let mut response = LedgerResponse::new(); + response.mut_auth().set_data(client_auth_response.into()); + Ok(response) +} + +/// Handles a client's query request. +pub(crate) async fn handle_query_request( + query: attest::Message, + enclave: E, + shard_clients: Vec>, + query_retries: usize, + logger: Logger, + tracer: &BoxedTracer, +) -> Result +where + E: LedgerEnclaveProxy, +{ + let mut query_responses: BTreeMap> = BTreeMap::new(); + let mut shards_to_query = shard_clients.clone(); + let sealed_query = enclave + .decrypt_and_seal_query(query.into()) + .map_err(|err| { + router_server_err_to_rpc_status( + "Key Images Query: internal encryption error", + err.into(), + logger.clone(), + ) + })?; + + // The retry logic here is: + // Set retries remaining to query_retries + // Send query and process responses + // If there's a response from every shard, we're done + // If there's a new store, repeat + // If there's no new store and we don't have enough responses, decrement + // remaining_retries and loop + let mut remaining_retries = query_retries; + while remaining_retries > 0 { + let multi_ledger_store_query_request = tracer + .in_span("create_multi_key_image_query", |_cx| { + enclave + .create_multi_key_image_store_query_data(sealed_query.clone()) + .map_err(|err| { + router_server_err_to_rpc_status( + "Key Images Query: internal encryption error", + err.into(), + logger.clone(), + ) + }) + })? + .into(); + let clients_and_responses = + route_query(&multi_ledger_store_query_request, shards_to_query.clone()) + .with_context(create_context( + tracer, + "send_multi_key_image_request_to_shards", + )) + .await + .map_err(|err| { + router_server_err_to_rpc_status( + "Key Images Query: internal query routing error", + err, + logger.clone(), + ) + })?; + + let processed_shard_response_data = + tracer.in_span("process_key_image_shard_responses", |_cx| { + process_shard_responses(clients_and_responses, logger.clone()).map_err(|err| { + router_server_err_to_rpc_status( + "Key Images Query: internal query response processing", + err, + logger.clone(), + ) + }) + })?; + + for (store_responder_id, new_query_response) in processed_shard_response_data + .new_query_responses + .into_iter() + { + query_responses.insert(store_responder_id, new_query_response.into()); + } + + if query_responses.len() >= shard_clients.len() { + break; + } + + shards_to_query = processed_shard_response_data.shard_clients_for_retry; + if !shards_to_query.is_empty() { + authenticate_ledger_stores( + enclave.clone(), + processed_shard_response_data.store_uris_for_authentication, + logger.clone(), + ) + .with_context(create_context(tracer, "authn_key_image_stores")) + .await?; + } else { + remaining_retries -= 1; + } + } + + if remaining_retries == 0 { + return Err(router_server_err_to_rpc_status( + "Key Images Query: timed out connecting to key image stores", + RouterServerError::LedgerStoreError(format!( + "Received {query_retries} responses which failed to advance the MultiKeyImageStoreRequest" + )), + logger.clone(), + )); + } + + let query_response = tracer.in_span("collate_key_image_responses", |_cx| { + enclave + .collate_shard_query_responses(sealed_query, query_responses) + .map_err(|err| { + router_server_err_to_rpc_status( + "Key Images Query: shard response collation error", + RouterServerError::Enclave(err), + logger.clone(), + ) + }) + })?; + + let mut response = LedgerResponse::new(); + response.set_check_key_image_response(query_response.into()); + Ok(response) +} + +/// Sends a client's query request to all of the Fog Ledger shards. +async fn route_query( + request: &MultiKeyImageStoreRequest, + shard_clients: Vec>, +) -> Result, MultiKeyImageStoreResponse)>, RouterServerError> { + let responses = shard_clients + .into_iter() + .map(|shard_client| query_shard(request, shard_client)); + try_join_all(responses).await +} + +/// Sends a client's query request to one of the Fog Ledger shards. +async fn query_shard( + request: &MultiKeyImageStoreRequest, + shard_client: Arc, +) -> Result<(Arc, MultiKeyImageStoreResponse), RouterServerError> { + let client_unary_receiver = shard_client.multi_key_image_store_query_async(request)?; + let response = client_unary_receiver.await?; + + Ok((shard_client, response)) +} + +// Authenticates Fog Ledger Stores that have previously not been authenticated. +async fn authenticate_ledger_stores( + enclave: E, + ledger_store_uris: Vec, + logger: Logger, +) -> Result, RpcStatus> { + let pending_auth_requests = ledger_store_uris + .into_iter() + .map(|store_uri| authenticate_ledger_store(enclave.clone(), store_uri, logger.clone())); + + try_join_all(pending_auth_requests).await.map_err(|err| { + router_server_err_to_rpc_status( + "Key Images Query: cannot authenticate with each Fog Ledger Store:", + err, + logger.clone(), + ) + }) +} + +// Authenticates a Fog Ledger Store that has previously not been authenticated. +async fn authenticate_ledger_store( + enclave: E, + ledger_store_url: KeyImageStoreUri, + logger: Logger, +) -> Result<(), RouterServerError> { + let ledger_store_id = ledger_store_url.responder_id()?; + let client_auth_request = enclave.ledger_store_init(ledger_store_id.clone())?; + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("authenticate-ledger-store".to_string()) + .build(), + ); + let ledger_store_client = KeyImageStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env) + .connect_to_uri(&ledger_store_url, &logger), + ); + + let auth_unary_receiver = ledger_store_client.auth_async(&client_auth_request.into())?; + let auth_response = auth_unary_receiver.await?; + + enclave + .ledger_store_connect(ledger_store_id, auth_response.into()) + .map_err(|e| e.into()) +} diff --git a/fog/ledger/server/src/router_server.rs b/fog/ledger/server/src/router_server.rs new file mode 100644 index 0000000000..adcf42576c --- /dev/null +++ b/fog/ledger/server/src/router_server.rs @@ -0,0 +1,221 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +use futures::executor::block_on; +use grpcio::ChannelBuilder; +use mc_attest_net::RaClient; +use mc_common::{ + logger::{log, Logger}, + time::SystemTimeProvider, +}; +use mc_fog_api::ledger_grpc; +use mc_fog_ledger_enclave::LedgerEnclaveProxy; +use mc_fog_uri::{ConnectionUri, FogLedgerUri}; +use mc_ledger_db::LedgerDB; +use mc_sgx_report_cache_untrusted::ReportCacheThread; +use mc_util_grpc::{ + AnonymousAuthenticator, Authenticator, ConnectionUriGrpcioChannel, ConnectionUriGrpcioServer, + TokenAuthenticator, +}; +use mc_util_uri::AdminUri; +use mc_watcher::watcher_db::WatcherDB; + +use crate::{ + config::LedgerRouterConfig, counters, router_admin_service::LedgerRouterAdminService, + router_service::LedgerRouterService, BlockService, MerkleProofService, UntrustedTxOutService, +}; + +pub struct LedgerRouterServer +where + E: LedgerEnclaveProxy, + RC: RaClient + Send + Sync + 'static, +{ + router_server: grpcio::Server, + admin_server: grpcio::Server, + client_listen_uri: FogLedgerUri, + admin_listen_uri: AdminUri, + config: LedgerRouterConfig, + enclave: E, + ra_client: RC, + report_cache_thread: Option, + logger: Logger, +} + +impl LedgerRouterServer +where + E: LedgerEnclaveProxy, + RC: RaClient + Send + Sync + 'static, +{ + pub fn new( + config: LedgerRouterConfig, + enclave: E, + ra_client: RC, + ledger: LedgerDB, + watcher: WatcherDB, + logger: Logger, + ) -> LedgerRouterServer { + let mut ledger_store_grpc_clients = HashMap::new(); + let grpc_env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("Main-RPC".to_string()) + .build(), + ); + for shard_uri in config.shard_uris.clone() { + let ledger_store_grpc_client = ledger_grpc::KeyImageStoreApiClient::new( + ChannelBuilder::default_channel_builder(grpc_env.clone()) + .connect_to_uri(&shard_uri, &logger), + ); + ledger_store_grpc_clients.insert(shard_uri, Arc::new(ledger_store_grpc_client)); + } + let ledger_store_grpc_clients = Arc::new(RwLock::new(ledger_store_grpc_clients)); + + let client_authenticator: Arc = + if let Some(shared_secret) = config.client_auth_token_secret.as_ref() { + Arc::new(TokenAuthenticator::new( + *shared_secret, + config.client_auth_token_max_lifetime, + SystemTimeProvider::default(), + )) + } else { + Arc::new(AnonymousAuthenticator::default()) + }; + + let env = Arc::new( + grpcio::EnvBuilder::new() + .name_prefix("ledger-router-server".to_string()) + .build(), + ); + + // Health check service - will be used in both router + admin interface + let health_service = mc_util_grpc::HealthService::new(None, logger.clone()).into_service(); + + // Build our router server. + // Init ledger router service. + let ledger_service = LedgerRouterService::new( + enclave.clone(), + ledger_store_grpc_clients.clone(), + config.query_retries, + logger.clone(), + ); + + let ledger_router_service = ledger_grpc::create_ledger_api(ledger_service.clone()); + log::debug!(logger, "Constructed Ledger Router GRPC Service"); + + let unary_key_image_service = ledger_grpc::create_fog_key_image_api(ledger_service); + + // Init ledger router admin service. + let ledger_router_admin_service = ledger_grpc::create_ledger_router_admin_api( + LedgerRouterAdminService::new(ledger_store_grpc_clients, logger.clone()), + ); + log::debug!(logger, "Constructed Ledger Router Admin GRPC Service"); + + // Non-routed servers and services + // Init merkle proof service + let merkle_proof_service = + ledger_grpc::create_fog_merkle_proof_api(MerkleProofService::new( + config.chain_id.clone(), + ledger.clone(), + enclave.clone(), + client_authenticator.clone(), + logger.clone(), + )); + // Init untrusted tx out service + let untrusted_tx_out_service = + ledger_grpc::create_fog_untrusted_tx_out_api(UntrustedTxOutService::new( + config.chain_id.clone(), + ledger.clone(), + watcher.clone(), + client_authenticator.clone(), + logger.clone(), + )); + // Init block service + let block_service = ledger_grpc::create_fog_block_api(BlockService::new( + config.chain_id.clone(), + ledger, + watcher, + client_authenticator, + logger.clone(), + )); + + // Package service into grpc server + log::info!( + logger, + "Starting Ledger Router server on {}", + config.client_listen_uri.addr(), + ); + + let router_server = grpcio::ServerBuilder::new(env.clone()) + .register_service(ledger_router_service) + .register_service(unary_key_image_service) + .register_service(merkle_proof_service) + .register_service(untrusted_tx_out_service) + .register_service(block_service) + .register_service(health_service) + .build_using_uri(&config.client_listen_uri, logger.clone()) + .expect("Could not build Ledger Router Server"); + let admin_server = grpcio::ServerBuilder::new(env) + .register_service(ledger_router_admin_service) + .build_using_uri(&config.admin_listen_uri, logger.clone()) + .expect("Could not build Ledger Router Admin Server"); + + Self { + router_server, + admin_server, + client_listen_uri: config.client_listen_uri.clone(), + admin_listen_uri: config.admin_listen_uri.clone(), + config, + enclave, + ra_client, + report_cache_thread: None, + logger, + } + } + + /// Starts the server + pub fn start(&mut self) { + self.report_cache_thread = Some( + ReportCacheThread::start( + self.enclave.clone(), + self.ra_client.clone(), + self.config.ias_spid, + &counters::ENCLAVE_REPORT_TIMESTAMP, + self.logger.clone(), + ) + .expect("failed starting report cache thread"), + ); + + self.router_server.start(); + log::info!( + self.logger, + "Router API listening on {}", + self.client_listen_uri.addr() + ); + + self.admin_server.start(); + log::info!( + self.logger, + "Router Admin API listening on {}", + self.admin_listen_uri.addr() + ); + } + + /// Stops the server + pub fn stop(&mut self) { + block_on(self.router_server.shutdown()).expect("Could not stop router grpc server"); + block_on(self.admin_server.shutdown()).expect("Could not stop router admin grpc server"); + } +} + +impl Drop for LedgerRouterServer +where + E: LedgerEnclaveProxy, + RC: RaClient + Send + Sync + 'static, +{ + fn drop(&mut self) { + self.stop(); + } +} diff --git a/fog/ledger/server/src/router_service.rs b/fog/ledger/server/src/router_service.rs new file mode 100644 index 0000000000..1d364c69bc --- /dev/null +++ b/fog/ledger/server/src/router_service.rs @@ -0,0 +1,186 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +use crate::{ + router_handlers::{self, handle_auth_request, handle_query_request}, + SVC_COUNTERS, +}; +use futures::{FutureExt, TryFutureExt}; +use grpcio::{DuplexSink, RequestStream, RpcContext, UnarySink}; +use mc_attest_api::attest::{AuthMessage, Message}; +use mc_common::logger::{log, Logger}; +use mc_fog_api::{ + ledger::{LedgerRequest, LedgerResponse}, + ledger_grpc::{self, FogKeyImageApi, KeyImageStoreApiClient, LedgerApi}, +}; +use mc_fog_ledger_enclave::LedgerEnclaveProxy; +use mc_fog_uri::KeyImageStoreUri; +use mc_util_grpc::{rpc_internal_error, rpc_logger}; +use mc_util_metrics::ServiceMetrics; +use mc_util_telemetry::tracer; + +use std::{ + collections::HashMap, + sync::{Arc, RwLock}, +}; + +#[derive(Clone)] +pub struct LedgerRouterService +where + E: LedgerEnclaveProxy, +{ + enclave: E, + shards: Arc>>>, + query_retries: usize, + logger: Logger, +} + +impl LedgerRouterService { + /// Creates a new LedgerRouterService that can be used by a gRPC server to + /// fulfill gRPC requests. + pub fn new( + enclave: E, + shards: Arc>>>, + query_retries: usize, + logger: Logger, + ) -> Self { + Self { + enclave, + shards, + query_retries, + logger, + } + } +} + +impl LedgerApi for LedgerRouterService +where + E: LedgerEnclaveProxy, +{ + fn request( + &mut self, + ctx: RpcContext, + requests: RequestStream, + responses: DuplexSink, + ) { + let _timer = SVC_COUNTERS.req(&ctx); + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + log::warn!( + self.logger, + "Streaming GRPC Ledger API only partially implemented." + ); + let logger = logger.clone(); + + let shards = self.shards.read().expect("RwLock poisoned"); + let method_name = ServiceMetrics::get_method_name(&ctx); + + let future = router_handlers::handle_requests( + method_name, + shards.values().cloned().collect(), + self.enclave.clone(), + requests, + responses, + self.query_retries, + logger.clone(), + ) + .map_err(move |err| log::error!(&logger, "failed to reply: {}", err)) + // TODO: Do more with the error than just push it to the log. + .map(|_| ()); + + ctx.spawn(future) + }); + } +} + +/// Used for the implementation of FogKeyImageApi::check_key_images(), +/// the legacy unary key-image API, for LedgerRouterService. +async fn unary_check_key_image_impl( + request: Message, + query_retries: usize, + enclave: E, + sink: UnarySink, + shard_clients: Vec>, + scope_logger: Logger, +) -> Result<(), grpcio::Error> +where + E: LedgerEnclaveProxy, +{ + let tracer = tracer!(); + let result = handle_query_request( + request, + enclave, + shard_clients, + query_retries, + scope_logger.clone(), + &tracer, + ) + .await; + + match result { + Ok(mut response) => { + if response.has_check_key_image_response() { + sink.success(response.take_check_key_image_response()).await + } else { + let error = rpc_internal_error( + "Inavlid LedgerRequest response", + "Cannot provide a check key image response to the client's key image request." + .to_string(), + &scope_logger, + ); + sink.fail(error).await + } + } + Err(rpc_status) => sink.fail(rpc_status).await, + } +} + +// This API is the unary key-image-specific equivalent of LedgerApi. +impl FogKeyImageApi for LedgerRouterService { + fn check_key_images(&mut self, ctx: RpcContext, request: Message, sink: UnarySink) { + let _timer = SVC_COUNTERS.req(&ctx); + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + let logger = logger.clone(); + let shards = self.shards.read().expect("RwLock poisoned"); + + let future = unary_check_key_image_impl( + request, + self.query_retries, + self.enclave.clone(), + sink, + shards.values().cloned().collect(), + logger.clone(), + ) + .map_err(move |err| log::error!(&logger, "failed to reply: {}", err)) + // TODO: Do more with the error than just push it to the log. + .map(|_| ()); + + ctx.spawn(future); + }) + } + + fn auth(&mut self, ctx: RpcContext, request: AuthMessage, sink: UnarySink) { + let _timer = SVC_COUNTERS.req(&ctx); + mc_common::logger::scoped_global_logger(&rpc_logger(&ctx, &self.logger), |logger| { + let logger = logger.clone(); + let result = handle_auth_request(self.enclave.clone(), request, logger.clone()); + let future = match result { + Ok(mut response) => { + if response.has_auth() { + sink.success(response.take_auth()) + } else { + let error = rpc_internal_error( + "Inavlid LedgerRequest response", + "Response to client's auth request did not contain an auth response." + .to_string(), + &logger, + ); + sink.fail(error) + } + } + Err(rpc_status) => sink.fail(rpc_status), + } + .map_err(move |err| log::error!(&logger, "failed to reply: {}", err)) + .map(|_| ()); + ctx.spawn(future); + }); + } +} diff --git a/fog/ledger/server/src/server.rs b/fog/ledger/server/src/server.rs deleted file mode 100644 index 4678ae5ac5..0000000000 --- a/fog/ledger/server/src/server.rs +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation - -use crate::{ - config::LedgerServerConfig, counters, db_fetcher::DbFetcher, BlockService, KeyImageService, - MerkleProofService, UntrustedTxOutService, -}; -use displaydoc::Display; -use futures::executor::block_on; -use grpcio::Error as GrpcError; -use mc_attest_net::RaClient; -use mc_common::{ - logger::{log, Logger}, - time::TimeProvider, -}; -use mc_fog_api::ledger_grpc; -use mc_fog_ledger_enclave::{Error as EnclaveError, LedgerEnclaveProxy}; -use mc_ledger_db::LedgerDB; -use mc_sgx_report_cache_untrusted::{Error as ReportCacheError, ReportCacheThread}; -use mc_util_encodings::Error as EncodingError; -use mc_util_grpc::{ - AnonymousAuthenticator, Authenticator, ConnectionUriGrpcioServer, ReadinessIndicator, - TokenAuthenticator, -}; -use mc_util_uri::ConnectionUri; -use mc_watcher::watcher_db::WatcherDB; -use std::sync::{Arc, Mutex}; - -#[derive(Debug, Display)] -pub enum LedgerServerError { - /// Ledger enclave error: {0} - Enclave(EnclaveError), - /// Failed to join thread: {0} - ThreadJoin(String), - /// RPC shutdown failure: {0} - RpcShutdown(String), - /// Attest convert error: {0} - Encoding(EncodingError), - /// Report cache error: {0} - ReportCache(ReportCacheError), - /// GRPC Error: {0} - Grpc(GrpcError), -} - -impl From for LedgerServerError { - fn from(src: EnclaveError) -> Self { - LedgerServerError::Enclave(src) - } -} - -impl From for LedgerServerError { - fn from(src: EncodingError) -> Self { - LedgerServerError::Encoding(src) - } -} - -impl From for LedgerServerError { - fn from(src: ReportCacheError) -> Self { - Self::ReportCache(src) - } -} - -impl From for LedgerServerError { - fn from(src: GrpcError) -> Self { - Self::Grpc(src) - } -} - -pub struct LedgerServer { - config: LedgerServerConfig, - server: Option, - key_image_service: KeyImageService, - merkle_proof_service: MerkleProofService, - block_service: BlockService, - untrusted_tx_out_service: UntrustedTxOutService, - enclave: E, - ra_client: R, - report_cache_thread: Option, - db_fetcher: Option, - logger: Logger, -} - -impl LedgerServer { - pub fn new( - config: LedgerServerConfig, - enclave: E, - ledger: LedgerDB, - watcher: WatcherDB, - ra_client: R, - time_provider: impl TimeProvider + 'static, - logger: Logger, - ) -> Self { - let client_authenticator: Arc = - if let Some(shared_secret) = config.client_auth_token_secret.as_ref() { - Arc::new(TokenAuthenticator::new( - *shared_secret, - config.client_auth_token_max_lifetime, - time_provider, - )) - } else { - Arc::new(AnonymousAuthenticator::default()) - }; - - let shared_state = Arc::new(Mutex::new(DbPollSharedState::default())); - - let key_image_service = KeyImageService::new( - config.chain_id.clone(), - ledger.clone(), - watcher.clone(), - enclave.clone(), - shared_state, - client_authenticator.clone(), - logger.clone(), - ); - let merkle_proof_service = MerkleProofService::new( - config.chain_id.clone(), - ledger.clone(), - enclave.clone(), - client_authenticator.clone(), - logger.clone(), - ); - let block_service = BlockService::new( - config.chain_id.clone(), - ledger.clone(), - watcher.clone(), - client_authenticator.clone(), - logger.clone(), - ); - let untrusted_tx_out_service = UntrustedTxOutService::new( - config.chain_id.clone(), - ledger, - watcher, - client_authenticator, - logger.clone(), - ); - - Self { - config, - server: None, - key_image_service, - merkle_proof_service, - block_service, - untrusted_tx_out_service, - enclave, - ra_client, - report_cache_thread: None, - db_fetcher: None, - logger, - } - } - - pub fn start(&mut self) -> Result<(), LedgerServerError> { - let ret = { - let readiness_indicator = ReadinessIndicator::default(); - - self.report_cache_thread = Some(ReportCacheThread::start( - self.enclave.clone(), - self.ra_client.clone(), - self.config.ias_spid, - &counters::ENCLAVE_REPORT_TIMESTAMP, - self.logger.clone(), - )?); - - self.db_fetcher = Some(DbFetcher::new( - self.key_image_service.get_ledger(), - self.enclave.clone(), - self.key_image_service.get_watcher(), - self.key_image_service.get_db_poll_shared_state(), - readiness_indicator.clone(), - self.logger.clone(), - )); - - let env = Arc::new( - grpcio::EnvBuilder::new() - .name_prefix("LedgerServer-RPC".to_string()) - .build(), - ); - - // Package endpoints into grpc service - let key_image_service = - ledger_grpc::create_fog_key_image_api(self.key_image_service.clone()); - let merkle_proof_service = - ledger_grpc::create_fog_merkle_proof_api(self.merkle_proof_service.clone()); - let block_service = ledger_grpc::create_fog_block_api(self.block_service.clone()); - let untrusted_tx_out_service = - ledger_grpc::create_fog_untrusted_tx_out_api(self.untrusted_tx_out_service.clone()); - - // Health check service - let health_service = mc_util_grpc::HealthService::new( - Some(readiness_indicator.into()), - self.logger.clone(), - ) - .into_service(); - - // Package service into grpc server - log::info!( - self.logger, - "Starting Ledger server on {}", - self.config.client_listen_uri.addr(), - ); - let server_builder = grpcio::ServerBuilder::new(env) - .register_service(key_image_service) - .register_service(merkle_proof_service) - .register_service(block_service) - .register_service(untrusted_tx_out_service) - .register_service(health_service); - - let mut server = server_builder - .build_using_uri(&self.config.client_listen_uri, self.logger.clone())?; - server.start(); - - self.server = Some(server); - - // Success. - Ok(()) - }; - if ret.is_err() { - self.stop(); - } - ret - } - - pub fn stop(&mut self) { - if let Some(ref mut server) = self.server { - block_on(server.shutdown()).expect("Could not stop grpc server"); - } - - if let Some(ref mut report_cache_thread) = self.report_cache_thread.take() { - report_cache_thread - .stop() - .expect("Could not stop report cache thread"); - } - - if let Some(ref mut db_fetcher) = self.db_fetcher.take() { - db_fetcher.stop().expect("Could not stop db fetcher"); - } - } -} - -impl Drop for LedgerServer { - fn drop(&mut self) { - self.stop(); - } -} - -/// State that we want to expose from the db poll thread -#[derive(Debug, Default)] -pub struct DbPollSharedState { - /// The highest block count for which we can guarantee we have loaded all - /// available data. - pub highest_processed_block_count: u64, - - /// The cumulative txo count of the last known block. - pub last_known_block_cumulative_txo_count: u64, - - /// The latest value of `block_version` in the blockchain - pub latest_block_version: u32, -} diff --git a/fog/ledger/server/src/sharding_strategy.rs b/fog/ledger/server/src/sharding_strategy.rs new file mode 100644 index 0000000000..a6fbab7497 --- /dev/null +++ b/fog/ledger/server/src/sharding_strategy.rs @@ -0,0 +1,245 @@ +// Copyright (c) 2018-2022 The MobileCoin Foundation + +//! Enables a Key Image Store to know for which blocks to process key images. +//! +//! By determining which key images to process, we are able to "shard" the set +//! of key images across Key Image Store instances. + +use mc_blockchain_types::BlockIndex; +use mc_fog_types::{common::BlockRange, BlockCount}; +use serde::Serialize; +use std::str::FromStr; + +/// Tells a Key Image Store for which blocks it should process key images. +pub trait ShardingStrategy { + /// Returns true if the Key Image Store should process this block. + fn should_process_block(&self, block_index: BlockIndex) -> bool; + + /// Returns true if the Key Image Store is ready to serve key images to the + /// client. + /// + /// Different sharding strategies might be ready to serve key images when + /// different conditions have been met. + fn is_ready(&self, processed_block_count: BlockCount) -> bool; + + /// Returns the block range that this sharding strategy is responsible for. + fn get_block_range(&self) -> BlockRange; +} + +/// Determines whether or not to process a block's key images based on the +/// "epoch" sharding strategy, in which a block is processed IFF it falls within +/// the contiguous range of blocks. +/// +/// In practice, the set of Key Image Shards will contain overlapping +/// [epoch_block_ranges] in order to obfuscate which shard processed the key +/// images. +#[derive(Clone, Serialize)] +pub struct EpochShardingStrategy { + /// If a block falls within this range, then the Key Image Store should + /// process its key images. + epoch_block_range: BlockRange, +} + +impl ShardingStrategy for EpochShardingStrategy { + fn should_process_block(&self, block_index: BlockIndex) -> bool { + self.epoch_block_range.contains(block_index) + } + + fn is_ready(&self, processed_block_count: BlockCount) -> bool { + self.have_enough_blocks_been_processed(processed_block_count) + } + + fn get_block_range(&self) -> BlockRange { + self.epoch_block_range.clone() + } +} + +impl Default for EpochShardingStrategy { + fn default() -> Self { + Self { + epoch_block_range: BlockRange::new(0, u64::MAX), + } + } +} + +impl EpochShardingStrategy { + pub fn new(epoch_block_range: BlockRange) -> Self { + Self { epoch_block_range } + } + + fn have_enough_blocks_been_processed(&self, processed_block_count: BlockCount) -> bool { + if self.is_first_epoch() { + return true; + } + + let epoch_block_range_length = + self.epoch_block_range.end_block - self.epoch_block_range.start_block; + let minimum_processed_block_count = epoch_block_range_length / 2; + + u64::from(processed_block_count) >= minimum_processed_block_count + } + + fn is_first_epoch(&self) -> bool { + self.epoch_block_range.start_block == 0 + } +} + +impl FromStr for EpochShardingStrategy { + type Err = String; + + fn from_str(s: &str) -> Result { + if let Ok(block_range) = BlockRange::from_str(s) { + return Ok(Self::new(block_range)); + } + + Err("Invalid epoch sharding strategy.".to_string()) + } +} + +#[cfg(test)] +mod epoch_sharding_strategy_tests { + use super::*; + + #[test] + fn should_process_block_block_index_is_before_epoch_start_returns_false() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = epoch_sharding_strategy.should_process_block(START_BLOCK - 1); + + assert!(!should_process_block) + } + + #[test] + fn should_process_block_block_index_is_epoch_start_returns_true() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = epoch_sharding_strategy.should_process_block(START_BLOCK); + + assert!(should_process_block) + } + + #[test] + fn should_process_block_block_index_is_in_epoch_block_range_returns_true() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let included_block_index = ((END_BLOCK_EXCLUSIVE - START_BLOCK) / 2) + START_BLOCK; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = + epoch_sharding_strategy.should_process_block(included_block_index); + + assert!(should_process_block) + } + + #[test] + fn should_process_block_block_index_is_one_before_epoch_end_block_range_returns_true() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + let should_process_block = + epoch_sharding_strategy.should_process_block(END_BLOCK_EXCLUSIVE - 1); + assert!(should_process_block) + } + + #[test] + fn should_process_block_block_index_is_epoch_end_block_range_returns_false() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = + epoch_sharding_strategy.should_process_block(END_BLOCK_EXCLUSIVE); + + assert!(!should_process_block) + } + + #[test] + fn should_process_block_block_index_is_after_epoch_end_block_range_returns_false() { + const START_BLOCK: BlockIndex = 50; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let should_process_block = + epoch_sharding_strategy.should_process_block(END_BLOCK_EXCLUSIVE + 1); + + assert!(!should_process_block) + } + + #[test] + fn is_ready_allows_0_in_0_to_100_shard() { + // The first epoch has a start block == 0. + const START_BLOCK: BlockIndex = 0; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready = epoch_sharding_strategy.is_ready(0.into()); + + assert!(is_ready) + } + + #[test] + fn is_ready_to_serve_allows_70_in_0_to_100_shard() { + // The first epoch has a start block == 0. + const START_BLOCK: BlockIndex = 0; + const END_BLOCK_EXCLUSIVE: BlockIndex = 100; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready = epoch_sharding_strategy.is_ready(70.into()); + + assert!(is_ready) + } + + #[test] + fn is_ready_not_first_shard_prevents_less_than_minimum() { + const START_BLOCK: BlockIndex = 100; + const END_BLOCK_EXCLUSIVE: BlockIndex = 111; + let epoch_block_range_length = END_BLOCK_EXCLUSIVE - START_BLOCK; + let minimum_processed_block_count = epoch_block_range_length / 2; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready = epoch_sharding_strategy.is_ready((minimum_processed_block_count - 1).into()); + + assert!(!is_ready) + } + + #[test] + fn is_ready_not_first_shard_allows_minimum() { + const START_BLOCK: BlockIndex = 100; + const END_BLOCK_EXCLUSIVE: BlockIndex = 111; + let epoch_block_range_length = END_BLOCK_EXCLUSIVE - START_BLOCK; + let minimum_processed_block_count = epoch_block_range_length / 2; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready = epoch_sharding_strategy.is_ready(minimum_processed_block_count.into()); + + assert!(is_ready) + } + + #[test] + fn is_ready_not_first_shard_allows_over_minimum() { + const START_BLOCK: BlockIndex = 100; + const END_BLOCK_EXCLUSIVE: BlockIndex = 110; + let epoch_block_range_length = END_BLOCK_EXCLUSIVE - START_BLOCK; + let minimum_processed_block_count = epoch_block_range_length / 2; + let epoch_block_range = BlockRange::new(START_BLOCK, END_BLOCK_EXCLUSIVE); + let epoch_sharding_strategy = EpochShardingStrategy::new(epoch_block_range); + + let is_ready = epoch_sharding_strategy.is_ready((minimum_processed_block_count + 1).into()); + + assert!(is_ready) + } +} diff --git a/fog/ledger/server/tests/connection.rs b/fog/ledger/server/tests/router_connection.rs similarity index 60% rename from fog/ledger/server/tests/connection.rs rename to fog/ledger/server/tests/router_connection.rs index b1066ec98c..dfea289f92 100644 --- a/fog/ledger/server/tests/connection.rs +++ b/fog/ledger/server/tests/router_connection.rs @@ -1,8 +1,9 @@ -// Copyright (c) 2018-2022 The MobileCoin Foundation +// Copyright (c) 2018-2023 The MobileCoin Foundation //! Integration tests at the level of the fog ledger connection / fog ledger //! grpc API +use futures::executor::block_on; use mc_account_keys::{AccountKey, PublicAddress}; use mc_api::watcher::TimestampResultCode; use mc_attest_net::{Client as AttestClient, RaClient}; @@ -11,18 +12,20 @@ use mc_blockchain_types::{BlockSignature, BlockVersion}; use mc_common::{ logger::{test_with_logger, Logger}, time::SystemTimeProvider, - ResponderId, }; use mc_crypto_keys::{CompressedRistrettoPublic, Ed25519Pair}; use mc_fog_api::ledger::TxOutResultCode; use mc_fog_ledger_connection::{ Error, FogKeyImageGrpcClient, FogMerkleProofGrpcClient, FogUntrustedLedgerGrpcClient, - KeyImageResultExtension, OutputResultExtension, + KeyImageResultExtension, LedgerGrpcClient, OutputResultExtension, }; use mc_fog_ledger_enclave::LedgerSgxEnclave; -use mc_fog_ledger_server::{LedgerServer, LedgerServerConfig}; +use mc_fog_ledger_server::{ + sharding_strategy::EpochShardingStrategy, KeyImageStoreServer, LedgerRouterConfig, + LedgerRouterServer, LedgerStoreConfig, ShardingStrategy, +}; use mc_fog_test_infra::get_enclave_path; -use mc_fog_uri::{ConnectionUri, FogLedgerUri}; +use mc_fog_uri::{ConnectionUri, FogLedgerUri, KeyImageStoreUri}; use mc_ledger_db::{test_utils::recreate_ledger_db, Ledger, LedgerDB}; use mc_transaction_core::{ membership_proofs::compute_implied_merkle_root, ring_signature::KeyImage, tokens::Mob, Amount, @@ -31,6 +34,7 @@ use mc_transaction_core::{ use mc_util_from_random::FromRandom; use mc_util_grpc::{GrpcRetryConfig, CHAIN_ID_MISMATCH_ERR_MSG}; use mc_util_test_helper::{CryptoRng, RngCore, RngType, SeedableRng}; +use mc_util_uri::AdminUri; use mc_watcher::watcher_db::WatcherDB; use std::{path::PathBuf, str::FromStr, sync::Arc, thread::sleep, time::Duration}; use tempfile::TempDir; @@ -59,8 +63,6 @@ fn setup_watcher_db(logger: Logger) -> (WatcherDB, PathBuf) { // hitting a fog ledger server #[test_with_logger] fn fog_ledger_merkle_proofs_test(logger: Logger) { - let base_port = 3230; - let mut rng = RngType::from_seed([0u8; 32]); for block_version in BlockVersion::iterator() { @@ -109,22 +111,31 @@ fn fog_ledger_merkle_proofs_test(logger: Logger) { { // Make LedgerServer - let client_uri = FogLedgerUri::from_str(&format!( + let client_listen_uri = FogLedgerUri::from_str(&format!( "insecure-fog-ledger://127.0.0.1:{}", - base_port + 7 + portpicker::pick_unused_port().expect("No free ports"), )) .unwrap(); - let config = LedgerServerConfig { + let admin_listen_uri = AdminUri::from_str(&format!( + "insecure-mca://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let config = LedgerRouterConfig { chain_id: "local".to_string(), ledger_db: db_full_path.to_path_buf(), watcher_db: watcher_dir, - admin_listen_uri: Default::default(), - client_listen_uri: client_uri.clone(), - client_responder_id: ResponderId::from_str(&client_uri.addr()).unwrap(), + admin_listen_uri: admin_listen_uri.clone(), + client_listen_uri: client_listen_uri.clone(), + client_responder_id: client_listen_uri + .responder_id() + .expect("Couldn't get responder ID for router"), + shard_uris: vec![], ias_spid: Default::default(), ias_api_key: Default::default(), client_auth_token_secret: None, client_auth_token_max_lifetime: Default::default(), + query_retries: 3, omap_capacity: OMAP_CAPACITY, }; @@ -135,24 +146,20 @@ fn fog_ledger_merkle_proofs_test(logger: Logger) { logger.clone(), ); - let ra_client = - AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); - let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); - let mut ledger_server = LedgerServer::new( + let ra_client = + AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); + let mut ledger_server = LedgerRouterServer::new( config, enclave, + ra_client, ledger.clone(), watcher.clone(), - ra_client, - SystemTimeProvider::default(), logger.clone(), ); - ledger_server - .start() - .expect("Failed starting ledger server"); + ledger_server.start(); // Make ledger enclave client let mut mr_signer_verifier = @@ -166,7 +173,7 @@ fn fog_ledger_merkle_proofs_test(logger: Logger) { let mut client = FogMerkleProofGrpcClient::new( "local".to_string(), - client_uri.clone(), + client_listen_uri.clone(), GRPC_RETRY_CONFIG, verifier.clone(), grpc_env.clone(), @@ -221,7 +228,7 @@ fn fog_ledger_merkle_proofs_test(logger: Logger) { // Check that wrong chain id results in an error let mut client = FogMerkleProofGrpcClient::new( "wrong".to_string(), - client_uri, + client_listen_uri, GRPC_RETRY_CONFIG, verifier, grpc_env, @@ -254,6 +261,7 @@ fn fog_ledger_merkle_proofs_test(logger: Logger) { panic!("Expected an error when chain-id is wrong"); } } + // grpcio detaches all its threads and does not join them :( // we opened a PR here: https://github.com/tikv/grpc-rs/pull/455 // in the meantime we can just sleep after grpcio env and all related @@ -267,8 +275,6 @@ fn fog_ledger_merkle_proofs_test(logger: Logger) { // a fog ledger server #[test_with_logger] fn fog_ledger_key_images_test(logger: Logger) { - let base_port = 3240; - let mut rng = RngType::from_seed([0u8; 32]); for block_version in BlockVersion::iterator() { @@ -338,51 +344,101 @@ fn fog_ledger_key_images_test(logger: Logger) { watcher.update_last_synced(&url1, 2).unwrap(); { - // Make LedgerServer - let client_uri = FogLedgerUri::from_str(&format!( + // Make Key Image Store + let store_uri = KeyImageStoreUri::from_str(&format!( + "insecure-key-image-store://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let store_admin_uri = AdminUri::from_str(&format!( + "insecure-mca://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let store_config = LedgerStoreConfig { + chain_id: "local".to_string(), + client_responder_id: store_uri + .responder_id() + .expect("Couldn't get responder ID for store"), + client_listen_uri: store_uri.clone(), + ledger_db: db_full_path.to_path_buf(), + watcher_db: watcher_dir.clone(), + ias_api_key: Default::default(), + ias_spid: Default::default(), + admin_listen_uri: Some(store_admin_uri), + client_auth_token_secret: None, + client_auth_token_max_lifetime: Default::default(), + omap_capacity: OMAP_CAPACITY, + sharding_strategy: ShardingStrategy::Epoch(EpochShardingStrategy::default()), + }; + let store_enclave = LedgerSgxEnclave::new( + get_enclave_path(mc_fog_ledger_enclave::ENCLAVE_FILE), + &store_config.client_responder_id, + OMAP_CAPACITY, + logger.clone(), + ); + let ra_client = + AttestClient::new(&store_config.ias_api_key).expect("Could not create IAS client"); + let mut store_server = KeyImageStoreServer::new_from_config( + store_config, + store_enclave, + ra_client, + ledger.clone(), + watcher.clone(), + EpochShardingStrategy::default(), + SystemTimeProvider::default(), + logger.clone(), + ); + + // Make Router Server + let client_listen_uri = FogLedgerUri::from_str(&format!( "insecure-fog-ledger://127.0.0.1:{}", - base_port + 7 + portpicker::pick_unused_port().expect("No free ports"), )) .unwrap(); - let config = LedgerServerConfig { + let admin_listen_uri = AdminUri::from_str(&format!( + "insecure-mca://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let router_config = LedgerRouterConfig { chain_id: "local".to_string(), ledger_db: db_full_path.to_path_buf(), watcher_db: watcher_dir, - admin_listen_uri: Default::default(), - client_listen_uri: client_uri.clone(), - client_responder_id: ResponderId::from_str(&client_uri.addr()).unwrap(), + admin_listen_uri: admin_listen_uri.clone(), + client_listen_uri: client_listen_uri.clone(), + shard_uris: vec![store_uri], + client_responder_id: client_listen_uri + .responder_id() + .expect("Couldn't get responder ID for router"), ias_spid: Default::default(), ias_api_key: Default::default(), client_auth_token_secret: None, client_auth_token_max_lifetime: Default::default(), + query_retries: 3, omap_capacity: OMAP_CAPACITY, }; let enclave = LedgerSgxEnclave::new( get_enclave_path(mc_fog_ledger_enclave::ENCLAVE_FILE), - &config.client_responder_id, + &router_config.client_responder_id, OMAP_CAPACITY, logger.clone(), ); let ra_client = - AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); - - let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); - - let mut ledger_server = LedgerServer::new( - config, + AttestClient::new(&router_config.ias_api_key).expect("Could not create IAS client"); + let mut router_server = LedgerRouterServer::new( + router_config, enclave, - ledger.clone(), - watcher, ra_client, - SystemTimeProvider::default(), + ledger.clone(), + watcher.clone(), logger.clone(), ); - ledger_server - .start() - .expect("Failed starting ledger server"); + store_server.start(); + router_server.start(); // Make ledger enclave client let mut mr_signer_verifier = @@ -394,26 +450,22 @@ fn fog_ledger_key_images_test(logger: Logger) { let mut verifier = Verifier::default(); verifier.mr_signer(mr_signer_verifier).debug(DEBUG_ENCLAVE); - let mut client = FogKeyImageGrpcClient::new( - String::default(), - client_uri, - GRPC_RETRY_CONFIG, - verifier, - grpc_env, - logger.clone(), - ); + let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); + let mut client = + LedgerGrpcClient::new(client_listen_uri, verifier, grpc_env, logger.clone()); // Check on key images - let mut response = client - .check_key_images(&[keys[0], keys[1], keys[3], keys[7], keys[19]]) - .expect("check_key_images failed"); + let mut response = + block_on(client.check_key_images(&[keys[0], keys[1], keys[3], keys[7], keys[19]])) + .expect("check_key_images failed"); let mut n = 1; // adding a delay to give fog ledger time to fully initialize while response.num_blocks != num_blocks { - response = client - .check_key_images(&[keys[0], keys[1], keys[3], keys[7], keys[19]]) - .expect("check_key_images failed"); + response = block_on( + client.check_key_images(&[keys[0], keys[1], keys[3], keys[7], keys[19]]), + ) + .expect("check_key_images failed"); sleep(Duration::from_secs(10)); // panic on the 20th time @@ -483,8 +535,6 @@ fn fog_ledger_key_images_test(logger: Logger) { // a fog ledger server #[test_with_logger] fn fog_ledger_blocks_api_test(logger: Logger) { - let base_port = 3250; - let mut rng = RngType::from_seed([0u8; 32]); let alice = AccountKey::random_with_fog(&mut rng); @@ -541,22 +591,31 @@ fn fog_ledger_blocks_api_test(logger: Logger) { { // Make LedgerServer - let client_uri = FogLedgerUri::from_str(&format!( + let client_listen_uri = FogLedgerUri::from_str(&format!( "insecure-fog-ledger://127.0.0.1:{}", - base_port + 7 + portpicker::pick_unused_port().expect("No free ports") )) .unwrap(); - let config = LedgerServerConfig { + let admin_listen_uri = AdminUri::from_str(&format!( + "insecure-mca://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let config = LedgerRouterConfig { chain_id: "local".to_string(), ledger_db: db_full_path.to_path_buf(), watcher_db: watcher_dir, - admin_listen_uri: Default::default(), - client_listen_uri: client_uri.clone(), - client_responder_id: ResponderId::from_str(&client_uri.addr()).unwrap(), + admin_listen_uri, + client_listen_uri: client_listen_uri.clone(), + client_responder_id: client_listen_uri + .responder_id() + .expect("Couldn't get responder ID for router"), + shard_uris: vec![], ias_spid: Default::default(), ias_api_key: Default::default(), client_auth_token_secret: None, client_auth_token_max_lifetime: Default::default(), + query_retries: 3, omap_capacity: OMAP_CAPACITY, }; @@ -567,28 +626,28 @@ fn fog_ledger_blocks_api_test(logger: Logger) { logger.clone(), ); - let ra_client = - AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); - let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); - let mut ledger_server = LedgerServer::new( + let ra_client = + AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); + let mut ledger_server = LedgerRouterServer::new( config, enclave, - ledger.clone(), - watcher, ra_client, - SystemTimeProvider::default(), + ledger.clone(), + watcher.clone(), logger.clone(), ); - ledger_server - .start() - .expect("Failed starting ledger server"); + ledger_server.start(); // Make unattested ledger client - let client = - FogUntrustedLedgerGrpcClient::new(client_uri, GRPC_RETRY_CONFIG, grpc_env, logger); + let client = FogUntrustedLedgerGrpcClient::new( + client_listen_uri, + GRPC_RETRY_CONFIG, + grpc_env, + logger, + ); // Try to get a block let queries = [0..1]; @@ -641,8 +700,6 @@ fn fog_ledger_blocks_api_test(logger: Logger) { // a fog ledger server #[test_with_logger] fn fog_ledger_untrusted_tx_out_api_test(logger: Logger) { - let base_port = 3260; - let mut rng = RngType::from_seed([0u8; 32]); let alice = AccountKey::random_with_fog(&mut rng); @@ -699,22 +756,31 @@ fn fog_ledger_untrusted_tx_out_api_test(logger: Logger) { { // Make LedgerServer - let client_uri = FogLedgerUri::from_str(&format!( + let client_listen_uri = FogLedgerUri::from_str(&format!( "insecure-fog-ledger://127.0.0.1:{}", - base_port + 7 + portpicker::pick_unused_port().expect("No free ports") )) .unwrap(); - let config = LedgerServerConfig { + let admin_listen_uri = AdminUri::from_str(&format!( + "insecure-mca://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let config = LedgerRouterConfig { chain_id: "local".to_string(), ledger_db: db_full_path.to_path_buf(), watcher_db: watcher_dir, - admin_listen_uri: Default::default(), - client_listen_uri: client_uri.clone(), - client_responder_id: ResponderId::from_str(&client_uri.addr()).unwrap(), + admin_listen_uri, + client_listen_uri: client_listen_uri.clone(), + client_responder_id: client_listen_uri + .responder_id() + .expect("Couldn't get responder ID for router"), + shard_uris: vec![], ias_spid: Default::default(), ias_api_key: Default::default(), client_auth_token_secret: None, client_auth_token_max_lifetime: Default::default(), + query_retries: 3, omap_capacity: OMAP_CAPACITY, }; @@ -725,35 +791,35 @@ fn fog_ledger_untrusted_tx_out_api_test(logger: Logger) { logger.clone(), ); - let ra_client = - AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); - let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); - let mut ledger_server = LedgerServer::new( + let ra_client = + AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); + let mut ledger_server = LedgerRouterServer::new( config, enclave, - ledger.clone(), - watcher, ra_client, - SystemTimeProvider::default(), + ledger.clone(), + watcher.clone(), logger.clone(), ); - ledger_server - .start() - .expect("Failed starting ledger server"); + ledger_server.start(); // Make unattested ledger client - let client = - FogUntrustedLedgerGrpcClient::new(client_uri, GRPC_RETRY_CONFIG, grpc_env, logger); + let client = FogUntrustedLedgerGrpcClient::new( + client_listen_uri, + GRPC_RETRY_CONFIG, + grpc_env, + logger, + ); // Get a tx_out that is actually in the ledger let real_tx_out0 = { ledger.get_tx_out_by_index(0).unwrap() }; // Try to get tx out records - let key = CompressedRistrettoPublic::try_from(&[0u8; 32]).expect("Could not construct key"); - let queries: Vec = vec![key, real_tx_out0.public_key]; + let queries: Vec = + vec![(&[0u8; 32]).try_into().unwrap(), real_tx_out0.public_key]; let result = client.get_tx_outs(queries).unwrap(); // Check that we got expected num_blocks value assert_eq!(result.num_blocks, 4); @@ -785,6 +851,272 @@ fn fog_ledger_untrusted_tx_out_api_test(logger: Logger) { sleep(Duration::from_millis(1000)); } +// Test that a fog ledger connection is able to check key images by hitting +// a fog ledger router using the unary API +#[test_with_logger] +fn fog_router_unary_key_image_test(logger: Logger) { + let mut rng = RngType::from_seed([0u8; 32]); + + for block_version in BlockVersion::iterator() { + let alice = AccountKey::random_with_fog(&mut rng); + + let recipients = vec![alice.default_subaddress()]; + + let keys: Vec = (0..20).map(|x| KeyImage::from(x as u64)).collect(); + + // Make LedgerDB + let ledger_dir = TempDir::new().expect("Could not get test_ledger tempdir"); + let db_full_path = ledger_dir.path(); + let mut ledger = recreate_ledger_db(db_full_path); + + // Make WatcherDB + let (mut watcher, watcher_dir) = setup_watcher_db(logger.clone()); + + // Populate ledger with some data + // Origin block cannot have key images + add_block_to_ledger( + block_version, + &mut ledger, + &recipients, + &[], + &mut rng, + &mut watcher, + ); + add_block_to_ledger( + block_version, + &mut ledger, + &recipients, + &keys[0..2], + &mut rng, + &mut watcher, + ); + add_block_to_ledger( + block_version, + &mut ledger, + &recipients, + &keys[3..6], + &mut rng, + &mut watcher, + ); + let num_blocks = add_block_to_ledger( + block_version, + &mut ledger, + &recipients, + &keys[6..9], + &mut rng, + &mut watcher, + ); + + // Populate watcher with Signature and Timestamp for block 1 + let url1 = Url::parse(TEST_URL).unwrap(); + let block1 = ledger.get_block(1).unwrap(); + let signing_key_a = Ed25519Pair::from_random(&mut rng); + let filename = String::from("00/00"); + let mut signed_block_a1 = + BlockSignature::from_block_and_keypair(&block1, &signing_key_a).unwrap(); + signed_block_a1.set_signed_at(1593798844); + watcher + .add_block_signature(&url1, 1, signed_block_a1, filename.clone()) + .unwrap(); + + // Update last synced to block 2, to indicate that this URL did not participate + // in consensus for block 2. + watcher.update_last_synced(&url1, 2).unwrap(); + + { + // Make Key Image Store + let store_uri = KeyImageStoreUri::from_str(&format!( + "insecure-key-image-store://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let store_admin_uri = AdminUri::from_str(&format!( + "insecure-mca://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let store_config = LedgerStoreConfig { + chain_id: "local".to_string(), + client_responder_id: store_uri + .responder_id() + .expect("Couldn't get responder ID for store"), + client_listen_uri: store_uri.clone(), + ledger_db: db_full_path.to_path_buf(), + watcher_db: watcher_dir.clone(), + ias_api_key: Default::default(), + ias_spid: Default::default(), + admin_listen_uri: Some(store_admin_uri), + client_auth_token_secret: None, + client_auth_token_max_lifetime: Default::default(), + omap_capacity: OMAP_CAPACITY, + sharding_strategy: ShardingStrategy::Epoch(EpochShardingStrategy::default()), + }; + let store_enclave = LedgerSgxEnclave::new( + get_enclave_path(mc_fog_ledger_enclave::ENCLAVE_FILE), + &store_config.client_responder_id, + OMAP_CAPACITY, + logger.clone(), + ); + let ra_client = + AttestClient::new(&store_config.ias_api_key).expect("Could not create IAS client"); + let mut store_server = KeyImageStoreServer::new_from_config( + store_config, + store_enclave, + ra_client, + ledger.clone(), + watcher.clone(), + EpochShardingStrategy::default(), + SystemTimeProvider::default(), + logger.clone(), + ); + + // Make Router Server + let router_client_listen_uri = FogLedgerUri::from_str(&format!( + "insecure-fog-ledger://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports"), + )) + .unwrap(); + let admin_listen_uri = AdminUri::from_str(&format!( + "insecure-mca://127.0.0.1:{}", + portpicker::pick_unused_port().expect("No free ports") + )) + .unwrap(); + let router_config = LedgerRouterConfig { + chain_id: "local".to_string(), + ledger_db: db_full_path.to_path_buf(), + watcher_db: watcher_dir, + admin_listen_uri: admin_listen_uri.clone(), + client_listen_uri: router_client_listen_uri.clone(), + client_responder_id: router_client_listen_uri + .responder_id() + .expect("Couldn't get responder ID for router"), + shard_uris: vec![store_uri], + ias_spid: Default::default(), + ias_api_key: Default::default(), + client_auth_token_secret: None, + client_auth_token_max_lifetime: Default::default(), + query_retries: 3, + omap_capacity: OMAP_CAPACITY, + }; + + let enclave = LedgerSgxEnclave::new( + get_enclave_path(mc_fog_ledger_enclave::ENCLAVE_FILE), + &router_config.client_responder_id, + OMAP_CAPACITY, + logger.clone(), + ); + + let ra_client = + AttestClient::new(&router_config.ias_api_key).expect("Could not create IAS client"); + let mut router_server = LedgerRouterServer::new( + router_config, + enclave, + ra_client, + ledger.clone(), + watcher.clone(), + logger.clone(), + ); + + store_server.start(); + router_server.start(); + + // Make ledger enclave client + let mut mr_signer_verifier = + MrSignerVerifier::from(mc_fog_ledger_enclave_measurement::sigstruct()); + mr_signer_verifier.allow_hardening_advisories( + mc_fog_ledger_enclave_measurement::HARDENING_ADVISORIES, + ); + + let mut verifier = Verifier::default(); + verifier.mr_signer(mr_signer_verifier).debug(DEBUG_ENCLAVE); + + let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); + let mut client = FogKeyImageGrpcClient::new( + String::default(), + router_client_listen_uri, + GRPC_RETRY_CONFIG, + verifier, + grpc_env, + logger.clone(), + ); + + // Check on key images + let mut response = client + .check_key_images(&[keys[0], keys[1], keys[3], keys[7], keys[19]]) + .expect("check_key_images failed"); + + let mut n = 1; + + while response.num_blocks != num_blocks { + response = client + .check_key_images(&[keys[0], keys[1], keys[3], keys[7], keys[19]]) + .expect("check_key_images failed"); + + // Ideally this should not require a sleep, but that's for a later PR. + sleep(Duration::from_secs(10)); + // panic on the 20th time + n += 1; // + if n > 20 { + panic!("Fog ledger not fully initialized"); + } + } + + // FIXME assert_eq!(response.num_txos, ...); + assert_eq!(response.results[0].key_image, keys[0]); + assert_eq!(response.results[0].status(), Ok(Some(1))); + assert_eq!( + response.results[0].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + assert_eq!(response.results[0].timestamp, 1); + + assert_eq!(response.results[1].key_image, keys[1]); + assert_eq!(response.results[1].status(), Ok(Some(1))); + assert_eq!( + response.results[1].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + assert_eq!(response.results[1].timestamp, 1); + + // Check a key_image for a block which will never have signatures & timestamps + assert_eq!(response.results[2].key_image, keys[3]); + assert_eq!(response.results[2].status(), Ok(Some(2))); // Spent in block 2 + assert_eq!( + response.results[2].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + assert_eq!(response.results[2].timestamp, 2); + + // Watcher has only synced 1 block, so timestamp should be behind + assert_eq!(response.results[3].key_image, keys[7]); + assert_eq!(response.results[3].status(), Ok(Some(3))); // Spent in block 3 + assert_eq!( + response.results[3].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + assert_eq!(response.results[3].timestamp, 3); + + // Check a key_image that has not been spent + assert_eq!(response.results[4].key_image, keys[19]); + assert_eq!(response.results[4].status(), Ok(None)); // Not spent + assert_eq!( + response.results[4].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + assert_eq!(response.results[4].timestamp, u64::MAX); + } + + // FIXME: Check a key_image that generates a DatabaseError - tough to generate + + // grpcio detaches all its threads and does not join them :( + // we opened a PR here: https://github.com/tikv/grpc-rs/pull/455 + // in the meantime we can just sleep after grpcio env and all related + // objects have been destroyed, and hope that those 6 threads see the + // shutdown requests within 1 second. + sleep(Duration::from_millis(1000)); + } +} + // Infra /// Adds a block containing one txo for each provided recipient and returns new diff --git a/fog/ledger/server/tests/router_integration.rs b/fog/ledger/server/tests/router_integration.rs new file mode 100644 index 0000000000..e5b8fa2a71 --- /dev/null +++ b/fog/ledger/server/tests/router_integration.rs @@ -0,0 +1,600 @@ +// Copyright (c) 2018-2023 The MobileCoin Foundation + +use mc_account_keys::{AccountKey, PublicAddress}; +use mc_api::watcher::TimestampResultCode; +use mc_attest_net::{Client as AttestClient, RaClient}; +use mc_attest_verifier::{MrSignerVerifier, Verifier, DEBUG_ENCLAVE}; +use mc_blockchain_types::BlockVersion; +use mc_common::{ + logger, + logger::{log, Logger}, + time::SystemTimeProvider, +}; +use mc_fog_ledger_connection::{KeyImageResultExtension, LedgerGrpcClient}; +use mc_fog_ledger_enclave::LedgerSgxEnclave; +use mc_fog_ledger_server::{ + sharding_strategy::EpochShardingStrategy, KeyImageStoreServer, LedgerRouterConfig, + LedgerRouterServer, LedgerStoreConfig, ShardingStrategy, +}; +use mc_fog_ledger_test_infra::ShardProxyServer; +use mc_fog_test_infra::get_enclave_path; +use mc_fog_types::common::BlockRange; +use mc_fog_uri::{FogLedgerUri, KeyImageStoreUri}; +use mc_ledger_db::{test_utils::recreate_ledger_db, LedgerDB}; +use mc_rand::{CryptoRng, RngCore}; +use mc_transaction_core::{ring_signature::KeyImage, tokens::Mob, Amount, Token}; +use mc_util_test_helper::{RngType, SeedableRng}; +use mc_util_uri::{AdminUri, ConnectionUri}; +use mc_watcher::watcher_db::WatcherDB; +use rand::thread_rng; +use serde::{Deserialize, Serialize}; +use std::{ + collections::HashMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, + path::{Path, PathBuf}, + str::FromStr, + sync::Arc, +}; +use tempfile::TempDir; +use url::Url; + +const TEST_URL: &str = "http://www.my_url1.com"; +const CHAIN_ID: &str = "local"; + +fn setup_watcher_db(path: PathBuf, logger: Logger) -> WatcherDB { + let url = Url::parse(TEST_URL).unwrap(); + + // create does not open + WatcherDB::create(&path).unwrap(); + WatcherDB::open_rw(&path, &[url], logger).unwrap() +} + +fn create_store_config( + store_uri: &KeyImageStoreUri, + block_range: BlockRange, + omap_capacity: u64, +) -> LedgerStoreConfig { + LedgerStoreConfig { + chain_id: CHAIN_ID.to_string(), + client_responder_id: store_uri + .responder_id() + .expect("Couldn't get responder ID for store"), + client_listen_uri: store_uri.clone(), + ledger_db: Default::default(), + watcher_db: Default::default(), + ias_api_key: Default::default(), + ias_spid: Default::default(), + admin_listen_uri: None, + client_auth_token_secret: None, + client_auth_token_max_lifetime: Default::default(), + omap_capacity, + sharding_strategy: ShardingStrategy::Epoch(EpochShardingStrategy::new(block_range)), + } +} + +fn add_block_to_ledger( + ledger_db: &mut LedgerDB, + recipients: &[PublicAddress], + key_images: &[KeyImage], + rng: &mut (impl CryptoRng + RngCore), + watcher: &mut WatcherDB, +) -> u64 { + let amount = Amount::new(10, Mob::ID); + let block_data = mc_ledger_db::test_utils::add_block_to_ledger( + ledger_db, + BlockVersion::MAX, + recipients, + amount, + key_images, + rng, + ) + .expect("failed to add block"); + let block_index = block_data.block().index; + + let signature = block_data.signature().expect("missing signature"); + for src_url in watcher.get_config_urls().unwrap().iter() { + watcher + .add_block_signature( + src_url, + block_index, + signature.clone(), + format!("00/{block_index}"), + ) + .expect("Could not add block signature"); + } + + block_index + 1 +} + +fn populate_ledger(blocks_config: &BlockConfig, ledger: &mut LedgerDB, watcher: &mut WatcherDB) { + let mut rng = thread_rng(); + + let alice = AccountKey::random_with_fog(&mut rng); + let recipients = vec![alice.default_subaddress()]; + // Origin block cannot have key images + add_block_to_ledger(ledger, &recipients, &[], &mut rng, watcher); + + for block in blocks_config { + let recipients: Vec<_> = block.keys().cloned().collect(); + let key_images: Vec<_> = block.values().flat_map(|x| x.clone()).collect(); + + add_block_to_ledger(ledger, &recipients, &key_images, &mut rng, watcher); + } +} + +fn create_store( + test_config: &StoreConfig, + blocks_config: &BlockConfig, + block_range: BlockRange, + watcher_db_path: &Path, + ledger_db_path: &Path, + logger: Logger, +) -> KeyImageStoreServer { + let uri = KeyImageStoreUri::from_str(&format!( + "insecure-key-image-store://{}", + test_config.address + )) + .unwrap(); + let block_range = test_config + .block_range + .as_ref() + .unwrap_or(&block_range) + .clone(); + let config = create_store_config(&uri, block_range.clone(), test_config.omap_capacity); + let enclave = LedgerSgxEnclave::new( + get_enclave_path(mc_fog_ledger_enclave::ENCLAVE_FILE), + &config.client_responder_id, + config.omap_capacity, + logger.clone(), + ); + + let mut ledger = recreate_ledger_db(ledger_db_path); + let mut watcher = setup_watcher_db(watcher_db_path.to_path_buf(), logger.clone()); + + populate_ledger(blocks_config, &mut ledger, &mut watcher); + + let ra_client = AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); + let mut store = KeyImageStoreServer::new_from_config( + config, + enclave, + ra_client, + ledger, + watcher, + EpochShardingStrategy::new(block_range), + SystemTimeProvider::default(), + logger, + ); + store.start(); + + store +} + +fn create_shard(config: &ShardConfig, _logger: Logger) -> ShardProxyServer { + ShardProxyServer::new( + &config.address, + config + .stores + .iter() + .map(|x| x.address.to_string()) + .collect(), + ) +} + +fn create_router( + test_config: &TestEnvironmentConfig, + blocks_config: &BlockConfig, + watcher_db_path: &Path, + ledger_db_path: &Path, + logger: Logger, +) -> LedgerRouterServer { + let uri = FogLedgerUri::from_str(&format!( + "insecure-fog-ledger://{}", + test_config.router_address + )) + .unwrap(); + let admin_uri = AdminUri::from_str(&format!( + "insecure-mca://{}", + test_config.router_admin_address + )) + .unwrap(); + + let mut ledger = recreate_ledger_db(ledger_db_path); + let mut watcher = setup_watcher_db(watcher_db_path.to_path_buf(), logger.clone()); + + populate_ledger(blocks_config, &mut ledger, &mut watcher); + + let config = LedgerRouterConfig { + chain_id: "local".to_string(), + ledger_db: ledger_db_path.to_path_buf(), + watcher_db: watcher_db_path.to_path_buf(), + shard_uris: test_config + .shards + .iter() + .map(|x| { + KeyImageStoreUri::from_str(&format!("insecure-key-image-store://{}", x.address)) + .unwrap() + }) + .collect(), + client_responder_id: uri + .responder_id() + .expect("Couldn't get responder ID for router"), + client_listen_uri: uri, + admin_listen_uri: admin_uri, + ias_spid: Default::default(), + ias_api_key: Default::default(), + client_auth_token_secret: None, + client_auth_token_max_lifetime: Default::default(), + query_retries: 3, + omap_capacity: test_config.omap_capacity, + }; + + let enclave = LedgerSgxEnclave::new( + get_enclave_path(mc_fog_ledger_enclave::ENCLAVE_FILE), + &config.client_responder_id, + config.omap_capacity, + logger.clone(), + ); + + let ra_client = AttestClient::new(&config.ias_api_key).expect("Could not create IAS client"); + + let mut router = LedgerRouterServer::new(config, enclave, ra_client, ledger, watcher, logger); + router.start(); + router +} + +fn create_router_client( + config: &TestEnvironmentConfig, + grpc_env: Arc, + logger: Logger, +) -> LedgerGrpcClient { + let uri = FogLedgerUri::from_str(&format!("insecure-fog-ledger://{}", config.router_address)) + .unwrap(); + + let mut mr_signer_verifier = + MrSignerVerifier::from(mc_fog_ledger_enclave_measurement::sigstruct()); + mr_signer_verifier + .allow_hardening_advisories(mc_fog_ledger_enclave_measurement::HARDENING_ADVISORIES); + let mut verifier = Verifier::default(); + verifier.mr_signer(mr_signer_verifier).debug(DEBUG_ENCLAVE); + + LedgerGrpcClient::new(uri, verifier, grpc_env, logger) +} + +fn create_env( + config: TestEnvironmentConfig, + blocks_config: BlockConfig, + grpc_env: Arc, + logger: Logger, +) -> TestEnvironment { + let mut shards = vec![]; + let mut stores = vec![]; + let mut tempdirs = vec![]; + for shard in config.shards.iter() { + for store in shard.stores.iter() { + let watcher_db_dir = + TempDir::new().expect("Couldn't create temporary path for watcher DB"); + let ledger_db_dir = + TempDir::new().expect("Couldn't create temporary path for ledger DB"); + stores.push(create_store( + store, + &blocks_config, + shard.block_range.clone(), + watcher_db_dir.path(), + ledger_db_dir.path(), + logger.clone(), + )); + tempdirs.push(watcher_db_dir); + tempdirs.push(ledger_db_dir); + } + + shards.push(create_shard(shard, logger.clone())); + } + + let watcher_db_dir = TempDir::new().expect("Couldn't create temporary path for watcher DB"); + let ledger_db_dir = TempDir::new().expect("Couldn't create temporary path for ledger DB"); + let router = create_router( + &config, + &blocks_config, + watcher_db_dir.path(), + ledger_db_dir.path(), + logger.clone(), + ); + tempdirs.push(watcher_db_dir); + tempdirs.push(ledger_db_dir); + + let router_client = create_router_client(&config, grpc_env, logger); + + TestEnvironment { + stores, + shards, + _router: router, + router_client, + _tempdirs: tempdirs, + } +} + +struct TestEnvironment { + router_client: LedgerGrpcClient, + _router: LedgerRouterServer, + shards: Vec, + stores: Vec>, + _tempdirs: Vec, +} + +impl Drop for TestEnvironment { + fn drop(&mut self) { + for shard in &mut self.shards { + tokio::task::block_in_place(move || { + tokio::runtime::Handle::current().block_on(async move { + shard.stop().await; + }) + }); + } + for store in &mut self.stores { + store.stop(); + } + } +} + +#[derive(Serialize, Deserialize)] +struct TestEnvironmentConfig { + router_address: SocketAddr, + router_admin_address: SocketAddr, + shards: Vec, + omap_capacity: u64, +} + +#[derive(Serialize, Deserialize)] +struct ShardConfig { + address: SocketAddr, + block_range: BlockRange, + stores: Vec, +} + +#[derive(Serialize, Deserialize)] +struct StoreConfig { + address: SocketAddr, + block_range: Option, + omap_capacity: u64, +} + +type BlockConfig = Vec>>; + +fn free_sockaddr() -> SocketAddr { + let port = portpicker::pick_unused_port().unwrap(); + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), port) +} + +#[tokio::test(flavor = "multi_thread")] +async fn smoke_test() { + let logger = logger::create_test_logger("smoke_test".to_string()); + log::info!(logger, "test"); + // Three shards, three stores each, correct config, each stores three blocks, + // each has three users with three keys each + let num_shards = 3; + let stores_per_shard = 3; + let blocks_per_shard = 3; + let mut rng = RngType::from_seed([0u8; 32]); + let mut shards_config = vec![]; + for i in 0..num_shards { + let mut stores_config = vec![]; + for _ in 0..stores_per_shard { + let store = StoreConfig { + address: free_sockaddr(), + block_range: None, + omap_capacity: 1000, + }; + stores_config.push(store); + } + let shard = ShardConfig { + address: free_sockaddr(), + // the 1-block offset is because block 0 cannot contain key images + block_range: BlockRange::new_from_length((i * blocks_per_shard) + 1, blocks_per_shard), + stores: stores_config, + }; + shards_config.push(shard); + } + let config = TestEnvironmentConfig { + router_address: free_sockaddr(), + router_admin_address: free_sockaddr(), + shards: shards_config, + omap_capacity: 1000, + }; + + let mut blocks_config = vec![]; + let mut key_index = 0; + let num_blocks = blocks_per_shard * num_shards; + let users_per_block = 3; + let keys_per_user = 3; + for _ in 0..num_blocks { + let mut block = HashMap::new(); + for _ in 0..users_per_block { + let account = AccountKey::random_with_fog(&mut rng); + let mut keys = vec![]; + for _ in 0..keys_per_user { + keys.push(KeyImage::from(key_index)); + key_index += 1; + } + block.insert(account.default_subaddress(), keys); + } + blocks_config.push(block); + } + + let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); + + let mut test_environment = create_env(config, blocks_config, grpc_env, logger.clone()); + + // Check that we can get all the key images from each shard + let keys_per_block = users_per_block * keys_per_user; + for i in 0..key_index { + let key = KeyImage::from(i); + let response = test_environment + .router_client + .check_key_images(&[key]) + .await + .expect("check_key_images failed"); + assert_eq!(response.results.len(), 1); + assert_eq!(response.results[0].key_image, key); + assert_eq!( + response.results[0].status(), + Ok(Some((i / keys_per_block) + 1)) + ); + assert_eq!( + response.results[0].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + } + + // Grab them all at once + let keys: Vec<_> = (0..key_index).map(KeyImage::from).collect(); + let response = test_environment + .router_client + .check_key_images(&keys) + .await + .expect("check_key_images failed"); + assert_eq!(response.results.len(), key_index as usize); + for i in 0..key_index { + let key = KeyImage::from(i); + assert_eq!(response.results[i as usize].key_image, key); + assert_eq!( + response.results[i as usize].status(), + Ok(Some((i / keys_per_block) + 1)) + ); + assert_eq!( + response.results[i as usize].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + } + + // Check that an unspent key image is unspent + let key = KeyImage::from(126u64); + let response = test_environment + .router_client + .check_key_images(&[key]) + .await + .expect("check_key_images failed"); + assert_eq!(response.results.len(), 1); + assert_eq!(response.results[0].key_image, key); + assert_eq!(response.results[0].status(), Ok(None)); // Not spent + assert_eq!( + response.results[0].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); +} + +#[tokio::test(flavor = "multi_thread")] +async fn overlapping_stores() { + let logger = logger::create_test_logger("overlapping_stores".to_string()); + log::info!(logger, "test"); + // Three shards, three stores each, correct config, each stores three blocks, + // each has three users with three keys each - but the blocks overlap (so + // total of 5 blocks) + let num_shards = 3; + let stores_per_shard = 3; + let blocks_per_shard = 3; + let mut rng = RngType::from_seed([0u8; 32]); + let mut shards_config = vec![]; + for i in 0..num_shards { + let mut stores_config = vec![]; + for _ in 0..stores_per_shard { + let store = StoreConfig { + address: free_sockaddr(), + block_range: None, + omap_capacity: 1000, + }; + stores_config.push(store); + } + let shard = ShardConfig { + address: free_sockaddr(), + block_range: BlockRange::new_from_length(i + 1, blocks_per_shard), + stores: stores_config, + }; + shards_config.push(shard); + } + let config = TestEnvironmentConfig { + router_address: free_sockaddr(), + router_admin_address: free_sockaddr(), + shards: shards_config, + omap_capacity: 1000, + }; + + let mut blocks_config = vec![]; + let mut key_index = 0; + let num_blocks = 5; + let users_per_block = 3; + let keys_per_user = 3; + for _ in 0..num_blocks { + let mut block = HashMap::new(); + for _ in 0..users_per_block { + let account = AccountKey::random_with_fog(&mut rng); + let mut keys = vec![]; + for _ in 0..keys_per_user { + keys.push(KeyImage::from(key_index)); + key_index += 1; + } + block.insert(account.default_subaddress(), keys); + } + blocks_config.push(block); + } + + let grpc_env = Arc::new(grpcio::EnvBuilder::new().build()); + + let mut test_environment = create_env(config, blocks_config, grpc_env, logger.clone()); + + // Check that we can get all the key images from each shard + let keys_per_block = users_per_block * keys_per_user; + for i in 0..key_index { + let key = KeyImage::from(i); + let response = test_environment + .router_client + .check_key_images(&[key]) + .await + .expect("check_key_images failed"); + assert_eq!(response.results.len(), 1); + assert_eq!(response.results[0].key_image, key); + assert_eq!( + response.results[0].status(), + Ok(Some((i / keys_per_block) + 1)) + ); + assert_eq!( + response.results[0].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + } + + // Grab them all at once + let keys: Vec<_> = (0..key_index).map(KeyImage::from).collect(); + let response = test_environment + .router_client + .check_key_images(&keys) + .await + .expect("check_key_images failed"); + assert_eq!(response.results.len(), key_index as usize); + for i in 0..key_index { + let key = KeyImage::from(i); + assert_eq!(response.results[i as usize].key_image, key); + assert_eq!( + response.results[i as usize].status(), + Ok(Some((i / keys_per_block) + 1)) + ); + assert_eq!( + response.results[i as usize].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); + } + + // Check that an unspent key image is unspent + let key = KeyImage::from(126u64); + let response = test_environment + .router_client + .check_key_images(&[key]) + .await + .expect("check_key_images failed"); + assert_eq!(response.results.len(), 1); + assert_eq!(response.results[0].key_image, key); + assert_eq!(response.results[0].status(), Ok(None)); // Not spent + assert_eq!( + response.results[0].timestamp_result_code, + TimestampResultCode::TimestampFound as u32 + ); +} diff --git a/fog/ledger/server/tests/store.rs b/fog/ledger/server/tests/store.rs new file mode 100644 index 0000000000..451a7c86f4 --- /dev/null +++ b/fog/ledger/server/tests/store.rs @@ -0,0 +1,331 @@ +// Copyright (c) 2018-2023 The MobileCoin Foundation + +use std::{ + collections::BTreeMap, + path::PathBuf, + str::FromStr, + sync::{Arc, Mutex}, +}; + +use mc_attest_ake::{AuthResponseInput, ClientInitiate, Start, Transition}; +use mc_attest_api::attest; +use mc_attest_enclave_api::{ClientSession, EnclaveMessage, NonceSession}; +use mc_attest_net::{Client as AttestClient, RaClient}; +use mc_attest_verifier::Verifier; +use mc_blockchain_types::MAX_BLOCK_VERSION; +use mc_common::{ + logger::{test_with_logger, Logger}, + ResponderId, +}; +use mc_crypto_keys::X25519; +use mc_fog_ledger_enclave::{ + CheckKeyImagesResponse, KeyImageData, LedgerEnclave, LedgerSgxEnclave, ENCLAVE_FILE, +}; +use mc_fog_ledger_enclave_api::UntrustedKeyImageQueryResponse; +use mc_fog_ledger_server::{ + sharding_strategy::EpochShardingStrategy, DbPollSharedState, KeyImageService, + KeyImageStoreServer, LedgerStoreConfig, ShardingStrategy, +}; +use mc_fog_types::ledger::{CheckKeyImagesRequest, KeyImageQuery}; +use mc_fog_uri::{ConnectionUri, KeyImageStoreScheme, KeyImageStoreUri}; +use mc_ledger_db::{test_utils::recreate_ledger_db, LedgerDB}; +use mc_rand::{CryptoRng, RngCore}; +use mc_util_grpc::AnonymousAuthenticator; +use mc_util_metrics::{IntGauge, OpMetrics}; +use mc_util_test_helper::{Rng, RngType, SeedableRng}; +use mc_util_uri::UriScheme; +use mc_watcher::watcher_db::WatcherDB; + +use aes_gcm::Aes256Gcm; +use portpicker::pick_unused_port; +use sha2::Sha512; +use tempfile::TempDir; +use url::Url; + +fn uri_for_test(port: u16) -> KeyImageStoreUri { + // If a load-balancer were set up in the middle here + // this might need to be changed to + // {KeyImageStoreScheme::SCHEME_INSECURE}://localhost:1234/? + // responder-id={test_name} + let name = format!( + "{}://localhost:{}", + KeyImageStoreScheme::SCHEME_INSECURE, + port + ); + KeyImageStoreUri::from_str(&name) + .expect("Could not create a URI for a key-image store test using localhost.") +} + +pub struct TestingContext { + pub enclave: LedgerSgxEnclave, + pub ledger: LedgerDB, + pub responder_id: ResponderId, + pub rng: R, + pub store_config: LedgerStoreConfig, + pub tempdir: TempDir, + pub tx_source_url: Url, + pub watcher: WatcherDB, + pub watcher_path: TempDir, +} + +impl TestingContext { + pub fn new( + test_name: impl AsRef, + logger: Logger, + port: u16, + omap_capacity: u64, + rng: R, + ) -> Self { + // Set up our directories. + let tempdir = TempDir::new().expect("Could not produce test_ledger tempdir"); + let test_path = PathBuf::from(tempdir.path()); + let user_keys_path = test_path.join("keys"); + std::fs::create_dir_all(user_keys_path).expect("Failed creating user keys directory"); + + let test_uri = uri_for_test(port); + // This ID needs to match the host:port clients use in their URI when + // referencing the host node. + let responder_id = test_uri.responder_id().expect("Test URI is invalid"); + + let enclave_path = std::env::current_exe() + .expect("Could not get the path of our executable") + // The test ends up in target/debug/deps/ + // rather than just target/debug/. So, + // we need the parent directory. + .parent() + .expect("Failed to get parent of enclave path.") + .with_file_name(ENCLAVE_FILE); + + let enclave = + LedgerSgxEnclave::new(enclave_path, &responder_id, omap_capacity, logger.clone()); + + // Make LedgerDB + let ledger_path = test_path.join("fog_ledger"); + let ledger = recreate_ledger_db(ledger_path.as_path()); + + // Set up wallet db. + let test_url_name = format!("http://{}.wallet.test.test", test_name.as_ref()); + let url = Url::parse(&test_url_name).expect("Failed to parse test url as a Url struct."); + + let db_tmp = TempDir::new().expect("Could not make tempdir for wallet db"); + WatcherDB::create(db_tmp.path()).expect("Could not create WatcherDB."); + let watcher = WatcherDB::open_rw(db_tmp.path(), &[url.clone()], logger) + .expect("Failed to open WatcherDB."); + + let config = LedgerStoreConfig { + chain_id: test_name.as_ref().to_string(), + client_responder_id: responder_id.clone(), + client_listen_uri: test_uri, + ledger_db: ledger_path, + watcher_db: PathBuf::from(db_tmp.path()), + ias_api_key: Default::default(), + ias_spid: Default::default(), + admin_listen_uri: Default::default(), + client_auth_token_secret: None, + client_auth_token_max_lifetime: Default::default(), + omap_capacity, + sharding_strategy: ShardingStrategy::Epoch(EpochShardingStrategy::default()), + }; + + Self { + enclave, + ledger, + responder_id, + rng, + tempdir, + tx_source_url: url, + store_config: config, + watcher, + watcher_path: db_tmp, + } + } +} + +lazy_static::lazy_static! { + pub static ref TEST_OP_COUNTERS: OpMetrics = OpMetrics::new_and_registered("consensus_service"); +} + +lazy_static::lazy_static! { + pub static ref TEST_ENCLAVE_REPORT_TIMESTAMP: IntGauge = TEST_OP_COUNTERS.gauge("enclave_report_timestamp"); +} + +#[test_with_logger] +pub fn direct_key_image_store_check(logger: Logger) { + const TEST_NAME: &str = "direct_key_image_store_check"; + const OMAP_CAPACITY: u64 = 768; + + let port = pick_unused_port().expect("No free ports"); + + let rng = RngType::from_entropy(); + let TestingContext { + enclave, + ledger, + responder_id, + mut rng, + tempdir: _tempdir, + tx_source_url: _tx_source_url, + watcher, + store_config, + watcher_path: _watcher_path, + } = TestingContext::new(TEST_NAME, logger.clone(), port, OMAP_CAPACITY, rng); + + let shared_state = Arc::new(Mutex::new(DbPollSharedState::default())); + + let client_listen_uri = store_config.client_listen_uri.clone(); + let store_service = KeyImageService::new( + client_listen_uri.clone(), + ledger, + watcher, + enclave.clone(), //LedgerSgxEnclave is an Arc internally + shared_state.clone(), + Arc::new(AnonymousAuthenticator::default()), + logger.clone(), + ); + + // Set up IAS verficiation + // This will be a SimClient in testing contexts. + let ias_client = + AttestClient::new(&store_config.ias_api_key).expect("Could not create IAS client"); + let mut store_server = KeyImageStoreServer::new_from_service( + store_service, + client_listen_uri, + enclave.clone(), + ias_client, + store_config.ias_spid, + EpochShardingStrategy::default(), + logger, + ); + store_server.start(); + + // Make GRPC client for sending requests. + + // Get the enclave to generate an auth request. + let client_auth_request = enclave + .ledger_store_init(responder_id.clone()) + .expect("Could not initialize ledger store on the enclave."); + // Submit auth request and wait for the response. + let (auth_response, _router_to_store_session) = enclave + .frontend_accept(client_auth_request) + .expect("frontend_accept() failed."); + // Finish the enclave's handshake with itself. + enclave + .ledger_store_connect(responder_id.clone(), auth_response) + .expect("Failed to complete the connection to a fog ledger store."); + + // Generate a dummy key image we're going to check against. + let mut test_key_image_bytes: [u8; 32] = [0u8; 32]; + rng.fill(&mut test_key_image_bytes); + let test_key_image = KeyImageData { + key_image: test_key_image_bytes.try_into().unwrap(), + block_index: 1, + timestamp: 255, + }; + enclave + .add_key_image_data(vec![test_key_image]) + .expect("Error adding key image data to the enclave."); + + // Set up the client's end of the encrypted connection. + let initiator = Start::new(responder_id.to_string()); + + let init_input = ClientInitiate::::default(); + let (initiator, auth_request_output) = initiator + .try_next(&mut rng, init_input) + .expect("Could not encrypt auth message."); + + // Authenticate our "client" with the server. + let auth_message = attest::AuthMessage::from(auth_request_output); + let (client_auth_response, client_session) = enclave + .client_accept(auth_message.into()) + .expect("Unable to connect a dummy \"client\" connection to the enclave."); + + // We will need to double-convert, ClientAuthResponse -> AuthMessage -> + // AuthResponseOutput + let auth_message = attest::AuthMessage::from(client_auth_response); + // Initiator accepts responder's message. + let auth_response_event = AuthResponseInput::new(auth_message.into(), Verifier::default()); + // Should be a valid noise connection at this point. + let (mut noise_connection, _verification_report) = initiator + .try_next(&mut rng, auth_response_event) + .expect("Could not get a noise connection and verification report from the initiator."); + + //Construct our request. + let key_images_request = CheckKeyImagesRequest { + queries: vec![KeyImageQuery { + key_image: test_key_image.key_image, + start_block: 1, + }], + }; + // Protobuf-encoded plaintext. + let message_encoded = mc_util_serial::encode(&key_images_request); + let ciphertext = noise_connection + .encrypt(&[], &message_encoded) + .expect("Failed to encrypt request from the client to the router."); + let msg: EnclaveMessage = EnclaveMessage { + aad: vec![], + channel_id: client_session, + data: ciphertext, + }; + + // Decrypt and seal + let sealed_query = enclave + .decrypt_and_seal_query(msg) + .expect("Unable to decrypt and seal client message."); + + let mut multi_query = enclave + .create_multi_key_image_store_query_data(sealed_query.clone()) + .expect("Could not create multi key image store query data."); + + let query = multi_query + .pop() + .expect("Query should have had one message"); + println!("Nonce session on message is {:?}", query.channel_id); + + // Get an untrusted query + let ( + highest_processed_block_count, + last_known_block_cumulative_txo_count, + latest_block_version, + ) = { + let shared_state = shared_state.lock().expect("mutex poisoned"); + ( + shared_state.highest_processed_block_count, + shared_state.last_known_block_cumulative_txo_count, + shared_state.latest_block_version, + ) + }; + + let untrusted_kiqr = UntrustedKeyImageQueryResponse { + highest_processed_block_count, + last_known_block_cumulative_txo_count, + latest_block_version, + max_block_version: latest_block_version.max(*MAX_BLOCK_VERSION), + }; + + let result = enclave + .check_key_image_store(query, untrusted_kiqr) + .expect("Checking key image store enclave failed."); + + let responses_btree: BTreeMap> = + BTreeMap::from([(responder_id, result)]); + + let client_response = enclave + .collate_shard_query_responses(sealed_query, responses_btree) + .expect("Error in collate_shard_query_responses()."); + + let plaintext_bytes = noise_connection + .decrypt(&client_response.aad, &client_response.data) + .expect("Could not decrypt response to client."); + + let done_response: CheckKeyImagesResponse = + mc_util_serial::decode(&plaintext_bytes).expect("Failed to decode CheckKeyImagesResponse."); + assert_eq!(done_response.results.len(), 1); + + let test_results = done_response + .results + .into_iter() + .map(|result| (result.key_image, result.key_image_result_code)) + .collect::>(); + + // The key image result code for a spent key image is 1. + assert_eq!(test_results, &[(test_key_image.key_image, 1)]); +} diff --git a/fog/ledger/test_infra/Cargo.toml b/fog/ledger/test_infra/Cargo.toml index 213225953d..b9039e8807 100644 --- a/fog/ledger/test_infra/Cargo.toml +++ b/fog/ledger/test_infra/Cargo.toml @@ -22,3 +22,10 @@ mc-transaction-core = { path = "../../../transaction/core" } mc-fog-ledger-enclave = { path = "../enclave" } mc-fog-ledger-enclave-api = { path = "../enclave/api" } mc-fog-types = { path = "../../types" } + +# third party +http = "0.2" +hyper = { version = "0.14", features = ["full"] } +rand = "0.8" +tokio = { version = "1", features = ["full"] } + diff --git a/fog/ledger/test_infra/src/lib.rs b/fog/ledger/test_infra/src/lib.rs index 2fdfaa10db..c4e5d59fde 100644 --- a/fog/ledger/test_infra/src/lib.rs +++ b/fog/ledger/test_infra/src/lib.rs @@ -2,8 +2,17 @@ //! Functionality for mocking and testing components in the ledger server +use http::Uri; +use hyper::{ + client::HttpConnector, + service::{make_service_fn, service_fn}, + Body, Client, Request, Response, Server, +}; use mc_attest_core::{IasNonce, Quote, QuoteNonce, Report, TargetInfo, VerificationReport}; -use mc_attest_enclave_api::{ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage}; +use mc_attest_enclave_api::{ + ClientAuthRequest, ClientAuthResponse, ClientSession, EnclaveMessage, NonceAuthRequest, + NonceAuthResponse, NonceSession, +}; use mc_blockchain_types::{ Block, BlockContents, BlockData, BlockIndex, BlockMetadata, BlockSignature, }; @@ -21,6 +30,9 @@ use mc_transaction_core::{ tx::{TxOut, TxOutMembershipElement, TxOutMembershipProof}, TokenId, }; +use rand::seq::SliceRandom; +use std::{net::SocketAddr, sync::Arc}; +use tokio::{sync::oneshot, task::JoinHandle}; #[derive(Default, Clone)] pub struct MockEnclave {} @@ -73,7 +85,7 @@ impl LedgerEnclave for MockEnclave { fn check_key_images( &self, _msg: EnclaveMessage, - _untrusted_keyimagequery_response: UntrustedKeyImageQueryResponse, + _response: UntrustedKeyImageQueryResponse, ) -> Result, mc_fog_ledger_enclave::Error> { unimplemented!() } @@ -84,6 +96,58 @@ impl LedgerEnclave for MockEnclave { ) -> Result<(), mc_fog_ledger_enclave::Error> { unimplemented!() } + + fn ledger_store_init(&self, _ledger_store_id: ResponderId) -> EnclaveResult { + unimplemented!() + } + + fn ledger_store_connect( + &self, + _ledger_store_id: ResponderId, + _ledger_store_auth_response: NonceAuthResponse, + ) -> EnclaveResult<()> { + unimplemented!() + } + + fn decrypt_and_seal_query( + &self, + _client_query: EnclaveMessage, + ) -> EnclaveResult { + unimplemented!() + } + + fn create_multi_key_image_store_query_data( + &self, + _sealed_query: mc_attest_enclave_api::SealedClientMessage, + ) -> EnclaveResult>> { + unimplemented!() + } + + fn collate_shard_query_responses( + &self, + _sealed_query: mc_attest_enclave_api::SealedClientMessage, + _shard_query_responses: std::collections::BTreeMap< + ResponderId, + EnclaveMessage, + >, + ) -> Result, mc_fog_ledger_enclave::Error> { + unimplemented!() + } + + fn check_key_image_store( + &self, + _msg: EnclaveMessage, + _response: UntrustedKeyImageQueryResponse, + ) -> EnclaveResult> { + unimplemented!() + } + + fn frontend_accept( + &self, + _auth_message: NonceAuthRequest, + ) -> EnclaveResult<(NonceAuthResponse, NonceSession)> { + unimplemented!() + } } #[derive(Clone, Default)] @@ -231,3 +295,71 @@ impl Ledger for MockLedger { unimplemented!() } } + +pub struct ShardProxyServer { + server_handle: Option>>, + stop_channel: Option>, +} + +impl ShardProxyServer { + async fn route( + request: Request, + client: Arc>, + endpoints: Arc>, + ) -> Result, hyper::Error> { + let endpoint = { + let mut rng = rand::thread_rng(); + endpoints.choose(&mut rng).unwrap() + }; + let (mut parts, body) = request.into_parts(); + + let mut uri_parts = parts.uri.clone().into_parts(); + uri_parts.authority = Some(endpoint.parse().unwrap()); + uri_parts.scheme = Some("http".parse().unwrap()); + parts.uri = Uri::from_parts(uri_parts).unwrap(); + + let request = Request::from_parts(parts, body); + let response = client.request(request).await; + response + } + + async fn shutdown(channel: oneshot::Receiver<()>) { + channel.await.unwrap_or(()); + } + + pub fn new(address: &SocketAddr, endpoints: Vec) -> Self { + let client = Arc::new(Client::builder().http2_only(true).build_http()); + let endpoints = Arc::new(endpoints); + let (tx, rx) = oneshot::channel::<()>(); + + let make_service = make_service_fn(move |_| { + let client = client.clone(); + let endpoints = endpoints.clone(); + + async move { + Ok::<_, hyper::Error>(service_fn(move |req| { + Self::route(req, client.clone(), endpoints.clone()) + })) + } + }); + + let server = Server::bind(address).serve(make_service); + let graceful = server.with_graceful_shutdown(Self::shutdown(rx)); + + let server_handle = tokio::spawn(async move { graceful.await }); + + Self { + server_handle: Some(server_handle), + stop_channel: Some(tx), + } + } + + pub async fn stop(&mut self) { + if let Some(stop_channel) = self.stop_channel.take() { + let _ = stop_channel.send(()); + } + if let Some(server_handle) = self.server_handle.take() { + let _ = server_handle.await; + } + } +} diff --git a/fog/uri/src/lib.rs b/fog/uri/src/lib.rs index b5634bc213..2052cd38b5 100644 --- a/fog/uri/src/lib.rs +++ b/fog/uri/src/lib.rs @@ -60,6 +60,20 @@ impl UriScheme for FogLedgerScheme { const DEFAULT_INSECURE_PORT: u16 = 3223; } +/// Key Image Store (for use with router) Uri Scheme +#[derive(Debug, Hash, Ord, PartialOrd, Eq, PartialEq, Clone)] +pub struct KeyImageStoreScheme {} + +impl UriScheme for KeyImageStoreScheme { + /// The part before the '://' of a URL. + const SCHEME_SECURE: &'static str = "key-image-store"; + const SCHEME_INSECURE: &'static str = "insecure-key-image-store"; + + /// Default port numbers + const DEFAULT_SECURE_PORT: u16 = 443; + const DEFAULT_INSECURE_PORT: u16 = 3223; +} + /// Fog Ingest Uri Scheme #[derive(Debug, Hash, Ord, PartialOrd, Eq, PartialEq, Clone)] pub struct FogIngestScheme {} @@ -92,7 +106,11 @@ impl UriScheme for IngestPeerScheme { pub type FogIngestUri = Uri; /// Uri used when talking to fog-ledger service, with the right default ports /// and scheme. +/// FogLedgerUri is also used for the router / client-facing part of the +/// router & store system. pub type FogLedgerUri = Uri; +/// Uri for a Fog key image store, to be queried by a Key Image Router. +pub type KeyImageStoreUri = Uri; /// Uri used when talking to fog view router service. pub type FogViewRouterUri = Uri; /// Uri used when talking to fog view store service. @@ -161,6 +179,14 @@ mod tests { ); assert!(!uri.use_tls()); + let uri = KeyImageStoreUri::from_str( + "insecure-key-image-store://node1.test.mobilecoin.com:3223/", + ) + .unwrap(); + assert_eq!(uri.addr(), "node1.test.mobilecoin.com:3223"); + assert_eq!( + uri.responder_id().unwrap(), + ResponderId::from_str("node1.test.mobilecoin.com:3223").unwrap() let uri = FogViewRouterUri::from_str( "insecure-fog-view-router://node1.test.mobilecoin.com:3225/", ) @@ -271,6 +297,27 @@ mod tests { ResponderId::from_str("node1.test.mobilecoin.com:666").unwrap() ); assert!(!uri.use_tls()); + + let uri = FogViewRouterUri::from_str( + "insecure-fog-view-router://node1.test.mobilecoin.com:3225/", + ) + .unwrap(); + assert_eq!(uri.addr(), "node1.test.mobilecoin.com:3225"); + assert_eq!( + uri.responder_id().unwrap(), + ResponderId::from_str("node1.test.mobilecoin.com:3225").unwrap() + ); + assert!(!uri.use_tls()); + + let uri = + FogViewStoreUri::from_str("insecure-fog-view-store://node1.test.mobilecoin.com:3225/") + .unwrap(); + assert_eq!(uri.addr(), "node1.test.mobilecoin.com:3225"); + assert_eq!( + uri.responder_id().unwrap(), + ResponderId::from_str("node1.test.mobilecoin.com:3225").unwrap() + ); + assert!(!uri.use_tls()); } #[test] diff --git a/fog/view/server/src/router_admin_service.rs b/fog/view/server/src/router_admin_service.rs index 1e3274842b..cd1a031f2f 100644 --- a/fog/view/server/src/router_admin_service.rs +++ b/fog/view/server/src/router_admin_service.rs @@ -8,7 +8,7 @@ use crate::{ use grpcio::{ChannelBuilder, RpcContext, RpcStatus, UnarySink}; use mc_common::logger::{log, Logger}; use mc_fog_api::{ - view::AddShardRequest, + fog_common::AddShardRequest, view_grpc::{FogViewRouterAdminApi, FogViewStoreApiClient}, }; use mc_fog_uri::FogViewStoreUri; diff --git a/fog/view/server/tests/streaming_smoke_tests.rs b/fog/view/server/tests/streaming_smoke_tests.rs index 5c11a5ed5a..14abd7468c 100644 --- a/fog/view/server/tests/streaming_smoke_tests.rs +++ b/fog/view/server/tests/streaming_smoke_tests.rs @@ -1,7 +1,7 @@ // Copyright (c) 2018-2022 The MobileCoin Foundation use futures::executor::block_on; -use mc_common::logger::{create_app_logger, o}; +use mc_common::logger; use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPublic}; use mc_fog_kex_rng::KexRngPubkey; use mc_fog_recovery_db_iface::{RecoveryDb, ReportData, ReportDb}; @@ -18,7 +18,7 @@ large_omap_one_store = { 1048576, 1, 6 }, large_omap_multiple_stores = { 1048576, 6, 1 }, )] fn test_streaming_integration(omap_capacity: u64, store_count: usize, blocks_per_store: u64) { - let (logger, _global_logger_guard) = create_app_logger(o!()); + let logger = logger::create_test_logger("overlapping_stores".to_string()); let store_block_ranges = mc_fog_view_server_test_utils::create_block_ranges(store_count, blocks_per_store); let mut test_environment = diff --git a/fog/view/server/tests/unary_smoke_tests.rs b/fog/view/server/tests/unary_smoke_tests.rs index 4db2df1f66..58c6c6c91e 100644 --- a/fog/view/server/tests/unary_smoke_tests.rs +++ b/fog/view/server/tests/unary_smoke_tests.rs @@ -9,7 +9,7 @@ // its way into the client. use mc_blockchain_types::{Block, BlockID, BlockVersion}; -use mc_common::logger::{create_app_logger, o}; +use mc_common::logger; use mc_crypto_keys::{CompressedRistrettoPublic, RistrettoPublic}; use mc_fog_kex_rng::KexRngPubkey; use mc_fog_recovery_db_iface::{RecoveryDb, ReportData, ReportDb}; @@ -31,7 +31,7 @@ large_omap_one_store = { 1048576, 1, 6 }, large_omap_multiple_stores = { 1048576, 6, 1 }, )] fn test_view_integration(view_omap_capacity: u64, store_count: usize, blocks_per_store: u64) { - let (logger, _global_logger_guard) = create_app_logger(o!()); + let logger = logger::create_test_logger("overlapping_stores".to_string()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); let store_block_ranges = mc_fog_view_server_test_utils::create_block_ranges(store_count, blocks_per_store); @@ -509,7 +509,7 @@ one_store = { 1, 40 }, multiple_stores = { 5, 8 }, )] fn test_overlapping_ingest_ranges(store_count: usize, blocks_per_store: u64) { - let (logger, _global_logger_guard) = create_app_logger(o!()); + let logger = logger::create_test_logger("overlapping_stores".to_string()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); const VIEW_OMAP_CAPACITY: u64 = 512; let store_block_ranges = @@ -637,7 +637,7 @@ one_store = { 1, 40 }, multiple_stores = { 5, 8 }, )] fn test_start_with_missing_range(store_count: usize, blocks_per_store: u64) { - let (logger, _global_logger_guard) = create_app_logger(o!()); + let logger = logger::create_test_logger("overlapping_stores".to_string()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); const VIEW_OMAP_CAPACITY: u64 = 512; let store_block_ranges = @@ -699,7 +699,7 @@ one_store = { 1, 40 }, multiple_stores = { 5, 8 }, )] fn test_middle_missing_range_with_decommission(store_count: usize, blocks_per_store: u64) { - let (logger, _global_logger_guard) = create_app_logger(o!()); + let logger = logger::create_test_logger("overlapping_stores".to_string()); let mut rng: StdRng = SeedableRng::from_seed([123u8; 32]); const VIEW_OMAP_CAPACITY: u64 = 512; let store_block_ranges = diff --git a/tools/fog-local-network/fog_conformance_tests.py b/tools/fog-local-network/fog_conformance_tests.py index 93d860e88b..8b63e35718 100755 --- a/tools/fog-local-network/fog_conformance_tests.py +++ b/tools/fog-local-network/fog_conformance_tests.py @@ -392,7 +392,8 @@ def __init__(self, work_dir, args): self.fog_view_router = None # TODO: Add more fog view instances with sharding. self.fog_view_store = None - self.fog_ledger = None + self.fog_ledger_router = None + self.key_image_store = None self.fog_report = None self.multi_balance_checker = None @@ -499,17 +500,30 @@ def run(self, args): ) self.fog_view_router.start() - self.fog_ledger = FogLedger( - name = 'ledger_server1', + self.key_image_store = FogKeyImageStore( + name = 'keyimage1', + client_port = BASE_KEY_IMAGE_STORE_PORT, + admin_port = BASE_KEY_IMAGE_STORE_ADMIN_PORT, + admin_http_gateway_port = BASE_KEY_IMAGE_STORE_ADMIN_HTTP_GATEWAY_PORT, + release = self.release, + sharding_strategy = 'default', + ledger_db_path = ledger2.ledger_db_path, + watcher_db_path = ledger2.watcher_db_path, + ) + self.key_image_store.start() + + self.fog_ledger_router = FogLedgerRouter( + name = 'ledger1', ledger_db_path = ledger2.ledger_db_path, client_responder_id = f'localhost:{BASE_NGINX_CLIENT_PORT}', client_port = BASE_LEDGER_CLIENT_PORT, admin_port = BASE_LEDGER_ADMIN_PORT, admin_http_gateway_port = BASE_LEDGER_ADMIN_HTTP_GATEWAY_PORT, watcher_db_path = ledger2.watcher_db_path, + shard_uris = [self.key_image_store.get_client_listen_uri()], release = self.release, ) - self.fog_ledger.start() + self.fog_ledger_router.start() self.fog_report = FogReport( name = 'report1', @@ -1029,8 +1043,11 @@ def stop(self): if self.multi_balance_checker: self.multi_balance_checker.stop() - if self.fog_ledger: - self.fog_ledger.stop() + if self.fog_ledger_router: + self.fog_ledger_router.stop() + + if self.key_image_store: + self.key_image_store.stop() if self.fog_report: self.fog_report.stop() diff --git a/tools/fog-local-network/fog_local_network.py b/tools/fog-local-network/fog_local_network.py index b698351790..0e1cf405e9 100644 --- a/tools/fog-local-network/fog_local_network.py +++ b/tools/fog-local-network/fog_local_network.py @@ -123,17 +123,30 @@ def start(self): ) self.fog_report.start() - self.fog_ledger = FogLedger( - 'ledger1', - self.nodes[0].ledger_dir, - f'localhost:{BASE_NGINX_CLIENT_PORT}', - BASE_LEDGER_CLIENT_PORT, - BASE_LEDGER_ADMIN_PORT, - BASE_LEDGER_ADMIN_HTTP_GATEWAY_PORT, - self.mobilecoind.watcher_db, - release=True, + self.key_image_store = FogKeyImageStore( + name = 'keyimage1', + client_port = BASE_KEY_IMAGE_STORE_PORT, + admin_port = BASE_KEY_IMAGE_STORE_ADMIN_PORT, + admin_http_gateway_port = BASE_KEY_IMAGE_STORE_ADMIN_HTTP_GATEWAY_PORT, + release = True, + sharding_strategy = 'default', + ledger_db_path = self.nodes[0].ledger_dir, + watcher_db_path = self.mobilecoind.watcher_db, + ) + self.key_image_store.start() + + self.fog_ledger_router = FogLedgerRouter( + name = 'ledger1', + ledger_db_path = self.nodes[0].ledger_dir, + client_responder_id = f'localhost:{BASE_NGINX_CLIENT_PORT}', + client_port = BASE_LEDGER_CLIENT_PORT, + admin_port = BASE_LEDGER_ADMIN_PORT, + admin_http_gateway_port = BASE_LEDGER_ADMIN_HTTP_GATEWAY_PORT, + watcher_db_path = self.mobilecoind.watcher_db, + shard_uris = [self.key_image_store.get_client_listen_uri()], + release = True, ) - self.fog_ledger.start() + self.fog_ledger_router.start() # Tell the ingest server to activate, giving it a little time for RPC to wakeup time.sleep(15) @@ -151,7 +164,8 @@ def stop_server(name): if server is not None: server.stop() - stop_server("fog_ledger") + stop_server("key_image_store") + stop_server("fog_ledger_router") stop_server("fog_report") stop_server("fog_view_store") stop_server("fog_view_router") diff --git a/tools/fog-local-network/local_fog.py b/tools/fog-local-network/local_fog.py index 2b559ca2ed..1b43777ad4 100644 --- a/tools/fog-local-network/local_fog.py +++ b/tools/fog-local-network/local_fog.py @@ -25,6 +25,9 @@ BASE_LEDGER_CLIENT_PORT = 7200 BASE_LEDGER_ADMIN_PORT = 7400 BASE_LEDGER_ADMIN_HTTP_GATEWAY_PORT = 7500 +BASE_KEY_IMAGE_STORE_PORT = 7600 +BASE_KEY_IMAGE_STORE_ADMIN_PORT = 7700 +BASE_KEY_IMAGE_STORE_ADMIN_HTTP_GATEWAY_PORT = 7800 BASE_NGINX_CLIENT_PORT = 8200 @@ -329,8 +332,8 @@ def stop(self): self.admin_http_gateway_process = None -class FogLedger: - def __init__(self, name, ledger_db_path, client_responder_id, client_port, admin_port, admin_http_gateway_port, watcher_db_path, release): +class FogLedgerRouter: + def __init__(self, name, ledger_db_path, client_responder_id, client_port, admin_port, admin_http_gateway_port, watcher_db_path, shard_uris, release): self.name = name self.ledger_db_path = ledger_db_path self.watcher_db_path = watcher_db_path @@ -341,11 +344,13 @@ def __init__(self, name, ledger_db_path, client_responder_id, client_port, admin self.admin_port = admin_port self.admin_http_gateway_port = admin_http_gateway_port + + self.shard_uris = shard_uris self.release = release self.target_dir = os.path.join(PROJECT_DIR, target_dir(self.release)) - self.ledger_server_process = None + self.ledger_router_process = None self.admin_http_gateway_process = None def __repr__(self): @@ -356,26 +361,85 @@ def start(self): assert os.path.exists(os.path.join(self.watcher_db_path, 'data.mdb')), self.watcher_db_path self.stop() - print(f'Starting fog ledger {self.name}') + print(f'Starting fog ledger router {self.name}') cmd = ' '.join([ - f'exec {self.target_dir}/ledger_server', + f'exec {self.target_dir}/ledger_router', f'--ledger-db={self.ledger_db_path}', f'--client-listen-uri={self.client_listen_url}', f'--client-responder-id={self.client_responder_id}', f'--ias-api-key={IAS_API_KEY}', f'--ias-spid={IAS_SPID}', + f'--shard-uris={",".join(self.shard_uris)}', f'--admin-listen-uri=insecure-mca://{LISTEN_HOST}:{self.admin_port}/', f'--watcher-db {self.watcher_db_path}', ]) - self.ledger_server_process = log_and_popen_shell(cmd) + self.ledger_router_process = log_and_popen_shell(cmd) + + print(f'Starting admin http gateway for fog ledger router') + self.admin_http_gateway_process = start_admin_http_gateway(self.admin_http_gateway_port, self.admin_port, self.target_dir) + + def stop(self): + if self.ledger_router_process and self.ledger_router_process.poll() is None: + self.ledger_router_process.terminate() + self.ledger_router_process = None + + if self.admin_http_gateway_process and self.admin_http_gateway_process.poll() is None: + self.admin_http_gateway_process.terminate() + self.admin_http_gateway_process = None + + +class FogKeyImageStore: + def __init__(self, name, client_port, admin_port, admin_http_gateway_port, release, sharding_strategy, ledger_db_path, watcher_db_path): + self.name = name + + self.client_port = client_port + self.client_responder_id = f'{LISTEN_HOST}:{self.client_port}' + self.sharding_strategy = sharding_strategy + self.client_listen_url = f'insecure-key-image-store://{LISTEN_HOST}:{self.client_port}/?sharding_strategy={self.sharding_strategy}' + self.sharding_strategy = sharding_strategy + self.ledger_db_path = ledger_db_path + self.watcher_db_path = watcher_db_path + + self.admin_port = admin_port + self.admin_http_gateway_port = admin_http_gateway_port + + self.release = release + self.target_dir = os.path.join(PROJECT_DIR, target_dir(self.release)) + + self.key_image_store_process = None + self.admin_http_gateway_process = None + + def __repr__(self): + return self.name + + def get_client_listen_uri(self): + return self.client_listen_url + + def start(self): + self.stop() + + print(f'Starting key image store {self.name}') + cmd = ' '.join([ + DATABASE_URL_ENV, + f'exec {self.target_dir}/key_image_store', + f'--client-listen-uri={self.client_listen_url}', + f'--client-responder-id={self.client_responder_id}', + f'--sharding-strategy={self.sharding_strategy}', + f'--ledger-db={self.ledger_db_path}', + f'--watcher-db={self.watcher_db_path}', + f'--ias-api-key={IAS_API_KEY}', + f'--ias-spid={IAS_SPID}', + f'--admin-listen-uri=insecure-mca://{LISTEN_HOST}:{self.admin_port}/', + ]) + self.key_image_store_process = log_and_popen_shell(cmd) - print(f'Starting admin http gateway for fog ledger') + print(f'Starting admin http gateway for key image store') self.admin_http_gateway_process = start_admin_http_gateway(self.admin_http_gateway_port, self.admin_port, self.target_dir) def stop(self): - if self.ledger_server_process and self.ledger_server_process.poll() is None: - self.ledger_server_process.terminate() - self.ledger_server_process = None + if self.key_image_store_process and self.key_image_store_process.poll() is None: + self.key_image_store_process.terminate() + self.key_image_store_process = None if self.admin_http_gateway_process and self.admin_http_gateway_process.poll() is None: self.admin_http_gateway_process.terminate()