From f56abf4b6302905d50334c4d17786a4cb7546106 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Thu, 16 Jan 2025 19:38:31 +0200 Subject: [PATCH 01/33] chore(api): optimize retrieval of assets --- Cargo.lock | 3 +- rocks-db/Cargo.toml | 1 + rocks-db/src/clients/asset_client.rs | 158 +++++++++++++++------------ rocks-db/src/column.rs | 3 + 4 files changed, 97 insertions(+), 68 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3019d3c7..a689a53e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -6510,6 +6510,7 @@ dependencies = [ "entities", "figment", "flatbuffers 24.3.25", + "futures", "futures-util", "hex", "indicatif", diff --git a/rocks-db/Cargo.toml b/rocks-db/Cargo.toml index 4555a54a..e0f7882b 100644 --- a/rocks-db/Cargo.toml +++ b/rocks-db/Cargo.toml @@ -16,6 +16,7 @@ figment = { workspace = true } lz4 = { workspace = true } tar = { workspace = true } reqwest = { workspace = true } +futures = { workspace = true } futures-util = { workspace = true } metrics-utils = { path = "../metrics_utils" } tokio = { workspace = true } diff --git a/rocks-db/src/clients/asset_client.rs b/rocks-db/src/clients/asset_client.rs index d639134d..11d893a2 100644 --- a/rocks-db/src/clients/asset_client.rs +++ b/rocks-db/src/clients/asset_client.rs @@ -6,7 +6,7 @@ use entities::{ enums::{AssetType, SpecificationAssetClass, TokenMetadataEdition}, models::{EditionData, PubkeyWithSlot}, }; -use futures_util::FutureExt; +use futures::future::Either; use solana_sdk::pubkey::Pubkey; use crate::{ @@ -148,65 +148,95 @@ impl Storage { owner_address: &Option, options: &Options, ) -> Result { - let assets_leaf_fut = self.asset_leaf_data.batch_get(asset_ids.clone()); let token_accounts_fut = if let Some(owner_address) = owner_address { - self.get_raw_token_accounts(Some(*owner_address), None, None, None, None, None, true) - .boxed() + Either::Left(self.get_raw_token_accounts( + Some(*owner_address), + None, + None, + None, + None, + None, + true, + )) } else { - async { Ok(Vec::new()) }.boxed() + Either::Right(async { Ok(Vec::new()) }) }; - let spl_mints_fut = self.spl_mints.batch_get(asset_ids.clone()); let inscriptions_fut = if options.show_inscription { - self.inscriptions.batch_get(asset_ids.clone()).boxed() + Either::Left(self.inscriptions.batch_get(asset_ids.clone())) } else { - async { Ok(Vec::new()) }.boxed() + Either::Right(async { Ok(Vec::new()) }) }; - let (mut assets_data, assets_collection_pks, mut urls) = - self.get_assets_with_collections_and_urls(asset_ids.clone()).await?; - let mut mpl_core_collections = HashMap::new(); - // todo: consider async/future here, but not likely as the very next call depends on urls from this one - if !assets_collection_pks.is_empty() { + + let (assets_leaf, assets_with_collectios_and_urls, token_accounts, spl_mints, inscriptions) = tokio::join!( + self.asset_leaf_data.batch_get(asset_ids.clone()), + self.get_assets_with_collections_and_urls(asset_ids.clone()), + token_accounts_fut, + self.spl_mints.batch_get(asset_ids.clone()), + inscriptions_fut, + ); + + let (mut assets_data, assets_collection_pks, mut urls) = assets_with_collectios_and_urls?; + + let offchain_data_fut = + self.asset_offchain_data.batch_get(urls.clone().into_values().collect::>()); + let asset_collection_data_fut = if assets_collection_pks.is_empty() { + Either::Left(async { Ok(Vec::new()) }) + } else { let assets_collection_pks = assets_collection_pks.into_iter().collect::>(); let start_time = chrono::Utc::now(); - let collection_d = self.db.batched_multi_get_cf( - &self.asset_data.handle(), - assets_collection_pks.clone(), - false, - ); - for asset in collection_d { - let asset = asset?; - if let Some(asset) = asset { - let asset = fb::root_as_asset_complete_details(asset.as_ref()) - .map_err(|e| StorageError::Common(e.to_string()))?; - let key = - Pubkey::new_from_array(asset.pubkey().unwrap().bytes().try_into().unwrap()); - if options.show_collection_metadata { - asset - .dynamic_details() - .and_then(|d| d.url()) - .and_then(|u| u.value()) - .map(|u| urls.insert(key, u.to_string())); - assets_data.insert(key, asset.into()); - } - if let Some(collection) = asset.collection() { - mpl_core_collections.insert(key, AssetCollection::from(collection)); - } + let red_metrics = self.red_metrics.clone(); + let db = self.db.clone(); + Either::Right(async move { + tokio::task::spawn_blocking(move || { + let collection_d = db.batched_multi_get_cf( + &db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + assets_collection_pks, + false, + ); + red_metrics.observe_request( + ROCKS_COMPONENT, + BATCH_GET_ACTION, + "get_asset_collection", + start_time, + ); + // since we cannot return referenced data from this closure, + // we need to convert the db slice to an owned value (Vec in this case). + collection_d + .into_iter() + .map(|res_opt| res_opt.map(|opt| opt.map(|slice| slice.as_ref().to_vec()))) + .collect() + }) + .await + .map_err(|e| StorageError::Common(e.to_string())) + }) + }; + + let (offchain_data, asset_collection_data) = + tokio::join!(offchain_data_fut, asset_collection_data_fut); + + let mut mpl_core_collections = HashMap::new(); + for asset in asset_collection_data? { + let asset = asset?; + if let Some(asset) = asset { + let asset = fb::root_as_asset_complete_details(asset.as_ref()) + .map_err(|e| StorageError::Common(e.to_string()))?; + let key = + Pubkey::new_from_array(asset.pubkey().unwrap().bytes().try_into().unwrap()); + if options.show_collection_metadata { + asset + .dynamic_details() + .and_then(|d| d.url()) + .and_then(|u| u.value()) + .map(|u| urls.insert(key, u.to_string())); + assets_data.insert(key, asset.into()); + } + if let Some(collection) = asset.collection() { + mpl_core_collections.insert(key, AssetCollection::from(collection)); } } - self.red_metrics.observe_request( - ROCKS_COMPONENT, - BATCH_GET_ACTION, - "get_asset_collection", - start_time, - ); } - let offchain_data_fut = - self.asset_offchain_data.batch_get(urls.clone().into_values().collect::>()); - - let (assets_leaf, offchain_data, token_accounts, spl_mints) = - tokio::join!(assets_leaf_fut, offchain_data_fut, token_accounts_fut, spl_mints_fut); let offchain_data = offchain_data .map_err(|e| StorageError::Common(e.to_string()))? .into_iter() @@ -223,27 +253,21 @@ impl Storage { }) .collect::>(); - let (inscriptions, inscriptions_data) = if options.show_inscription { - let inscriptions = inscriptions_fut + let inscriptions = inscriptions + .map_err(|e| StorageError::Common(e.to_string()))? + .into_iter() + .filter_map(|asset| asset.map(|a| (a.root, a))) + .collect::>(); + let inscriptions_data = to_map!( + self.inscription_data + .batch_get( + inscriptions + .values() + .map(|inscription| inscription.inscription_data_account) + .collect(), + ) .await - .map_err(|e| StorageError::Common(e.to_string()))? - .into_iter() - .filter_map(|asset| asset.map(|a| (a.root, a))) - .collect::>(); - let inscriptions_data = to_map!( - self.inscription_data - .batch_get( - inscriptions - .values() - .map(|inscription| inscription.inscription_data_account) - .collect(), - ) - .await - ); - (inscriptions, inscriptions_data) - } else { - (HashMap::new(), HashMap::new()) - }; + ); let token_accounts = token_accounts.map_err(|e| StorageError::Common(e.to_string()))?; let spl_mints = to_map!(spl_mints); diff --git a/rocks-db/src/column.rs b/rocks-db/src/column.rs index 920b768b..e94a1b4a 100644 --- a/rocks-db/src/column.rs +++ b/rocks-db/src/column.rs @@ -204,6 +204,9 @@ where } pub async fn batch_get(&self, keys: Vec) -> Result>> { + if keys.is_empty() { + return Ok(Vec::new()); + } let start_time = chrono::Utc::now(); let db = self.backend.clone(); let keys = keys.clone(); From 0c5eaf200f8e48d41c4001f92a6c0f9119542247 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Sat, 18 Jan 2025 16:26:29 +0100 Subject: [PATCH 02/33] MTG-1223 Fix/Improve Clap implementation - Allow disabling parameters (api, rocks_migration, backfiller) via Clap CLI when their default value is True - Improve parameter names for clarity - Add additional system logging - Replace old .env example --- .env.example | 216 +++++++++++----------- .env.example_new | 132 ------------- .env.example_old | 129 +++++++++++++ Cargo.lock | 32 ++-- Cargo.toml | 4 +- nft_ingester/src/bin/api/main.rs | 2 +- nft_ingester/src/bin/ingester/main.rs | 39 ++-- nft_ingester/src/bin/ingester/readme.md | 6 +- nft_ingester/src/bin/synchronizer/main.rs | 2 +- nft_ingester/src/config.rs | 94 +++++----- 10 files changed, 335 insertions(+), 321 deletions(-) delete mode 100644 .env.example_new create mode 100644 .env.example_old diff --git a/.env.example b/.env.example index b1b5dcb9..a47c8562 100644 --- a/.env.example +++ b/.env.example @@ -1,122 +1,55 @@ -# Required by Postgre container -POSTGRE_DB_PATH="postgre/db/path" - -RUST_BACKTRACE=1 -# Ingester instance config -INGESTER_LOG_LEVEL=info - -INGESTER_DATABASE_CONFIG='{max_postgres_connections=10, url="postgres://user:pass@0.0.0.0:5432/database"}' -INGESTER_TCP_CONFIG='{receiver_addr="localhost:2000", receiver_reconnect_interval=5, snapshot_receiver_addr="localhost:5000"}' -INGESTER_REDIS_MESSENGER_CONFIG='{messenger_type="Redis", connection_config={redis_connection_str="redis://:pass@localhost:6379"}}' -INGESTER_MESSAGE_SOURCE=Redis #TCP or Redis - -INGESTER_ACCOUNTS_BUFFER_SIZE=250 -INGESTER_ACCOUNTS_PARSING_WORKERS=20 -INGESTER_TRANSACTIONS_PARSING_WORKERS=20 - -INGESTER_SNAPSHOT_PARSING_WORKERS=1 -INGESTER_SNAPSHOT_PARSING_BATCH_SIZE=250 - -INGESTER_GAPFILLER_PEER_ADDR="0.0.0.0" -INGESTER_METRICS_PORT=9091 -INGESTER_SERVER_PORT=9092 -INGESTER_PEER_GRPC_PORT=9099 - -INGESTER_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" -INGESTER_ROCKS_DB_PATH="path/to/rocks/on/disk" - -INGESTER_ARCHIVES_DIR="path/to/rocks/backup/archives" -INGESTER_ROCKS_BACKUP_ARCHIVES_DIR="path/to/rocks/backup/archives" -INGESTER_ROCKS_BACKUP_DIR="path/to/rocks/backup/" - -INGESTER_BACKFILL_RPC_ADDRESS='https://rpc:port' -INGESTER_RPC_HOST='https://rpc:port' - -INGESTER_BACKFILLER_SOURCE_MODE=RPC #RPC or Bigtable -INGESTER_BIG_TABLE_CONFIG='{creds="/usr/src/app/creds.json", timeout=1000}' - -INGESTER_RUN_SEQUENCE_CONSISTENT_CHECKER=false # experimental, enable only for testing purposes -# Optional, required only if it needs to run fork cleaner, default is false. Unstable as it removes forked items, but also removes some valid leafs. Recommended to use only! for testing purposes. -INGESTER_RUN_FORK_CLEANER=false -INGESTER_RUN_BUBBLEGUM_BACKFILLER=true - -INGESTER_BACKFILLER_MODE=PersistAndIngest # The only available option, the variable will be removed -INGESTER_SLOT_UNTIL=0 -INGESTER_SLOT_START_FROM=0 -INGESTER_WORKERS_COUNT=100 -INGESTER_CHUNK_SIZE=20 -INGESTER_PERMITTED_TASKS=1 -INGESTER_WAIT_PERIOD_SEC=30 -INGESTER_SHOULD_REINGEST=false - -INGESTER_PEER_GRPC_MAX_GAP_SLOTS=1000000 - -INGESTER_RUN_PROFILING=false -INGESTER_PROFILING_FILE_PATH_CONTAINER="/usr/src/profiling" -INGESTER_PROFILING_FILE_PATH="/path/to/profiling" +RPC_HOST='https://mainnet-aura.metaplex.com/{personal_rpc_key}' -INGESTER_FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage" -INGESTER_FILE_STORAGE_PATH="path/to/file/storage" -INGESTER_MIGRATION_STORAGE_PATH=/path/to/migration_storage # requires explanation +#Postgres +PG_DATABASE_URL='postgres://solana:solana@localhost:5432/aura_db' -INGESTER_ROCKS_FLUSH_BEFORE_BACKUP=false -INGESTER_ROCKS_INTERVAL_IN_SECONDS=3600 -INGESTER_ROCKS_SYNC_INTERVAL_SECONDS=2 +#Redis +REDIS_CONNECTION_CONFIG='{"redis_connection_str":"redis://127.0.0.1:6379/0"}' -INGESTER_SYNCHRONIZER_DUMP_PATH="/path/to/dump" -INGESTER_DISABLE_SYNCHRONIZER=true -INGESTER_SKIP_CHECK_TREE_GAPS=true +#RocksDB +ROCKS_DB_PATH="/usr/src/rocksdb-data" +ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" +ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db" # path to the slots data, required for the backfiller to work -INGESTER_SLOTS_DB_PATH=/path/to/slots-data -INGESTER_SECONDARY_SLOTS_DB_PATH=/path/to/secondary/ingester-slots # should be removed +ROCKS_SLOTS_DB_PATH=/path/to/slots-data +ROCKS_SECONDARY_SLOTS_DB_PATH=/path/to/secondary/ingester-slots +ROCKS_ARCHIVES_DIR="path/to/rocks/backup/archives" +ROCKS_BACKUP_ARCHIVES_DIR="path/to/rocks/backup/archives" +ROCKS_MIGRATION_STORAGE_PATH=/path/to/migration_storage -# a common log level for all instances, will be overridden by specific log levels, requires refactoring -RUST_LOG=info -# API instance config -API_LOG_LEVEL=info +#Backfiller +BACKFILLER_SOURCE_MODE=bigtable +BIG_TABLE_CONFIG='{creds="/usr/src/app/creds.json", timeout=1000}' -API_DATABASE_CONFIG='{max_postgres_connections=250, url="postgres://user:pass@0.0.0.0:5432/database"}' - -API_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" -API_ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db" -API_ARCHIVES_DIR="path/to/rocks/backup/archives" - -API_PEER_GRPC_PORT=8991 +# Metrics port. Start HTTP server to report metrics if port exist. API_METRICS_PORT=8985 -API_SERVER_PORT=8990 - -API_RPC_HOST='https://rpc:port' - -API_ROCKS_SYNC_INTERVAL_SECONDS=2 -API_FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage" -API_FILE_STORAGE_PATH="path/to/file/storage" - -API_PEER_GRPC_MAX_GAP_SLOTS=1000000 -API_JSON_MIDDLEWARE_CONFIG='{is_enabled=true, max_urls_to_parse=10}' - -API_CONSISTENCE_SYNCHRONIZATION_API_THRESHOLD=1000000 -API_CONSISTENCE_BACKFILLING_SLOTS_THRESHOLD=500 - -# if set to true API will not check if tree where user requests assets from has any gaps -API_SKIP_CHECK_TREE_GAPS=true - -# Synchronizer instance config -SYNCHRONIZER_LOG_LEVEL=info +INGESTER_METRICS_PORT=9091 +MIGRATOR_METRICS_PORT=5091 +SYNCHRONIZER_METRICS_PORT=6091 -SYNCHRONIZER_DATABASE_CONFIG='{max_postgres_connections=100, url="postgres://user:pass@0.0.0.0:5432/database"}' -SYNCHRONIZER_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" -SYNCHRONIZER_ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db" # should be removed +# API server port (if API is enabled) +INGESTER_SERVER_PORT=9092 +API_SERVER_PORT=8990 -SYNCHRONIZER_METRICS_PORT=6091 +# GRPC Server port +PEER_GRPC_PORT=9099 -SYNCHRONIZER_DUMP_PATH="/path/to/migration_data" +# Docker +# Required by Postgre container +POSTGRE_DB_PATH="postgre/db/path" +ROCKS_BACKUP_DIR="path/to/rocks/backup/" +FILE_STORAGE_PATH="path/to/file/storage" +FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage" +ROCKS_DUMP_PATH="/path/to/dump" -SYNCHRONIZER_DUMP_SYNCHRONIZER_BATCH_SIZE=10000 -# threshold on the number of updates not being synchronized for the synchronizer to dump-load on start -# 150M - that's a rough threshold after which the synchronizer will likely complete a full dymp-load cycle faster then doing an incremental sync -SYNCHRONIZER_DUMP_SYNC_THRESHOLD=150000000 +#Profiling (optional) +PROFILING_FILE_PATH_CONTAINER="/usr/src/profiling" +PROFILING_FILE_PATH="/path/to/profiling" -SYNCHRONIZER_PARALLEL_TASKS=30 +# DEV configuration +RUST_BACKTRACE=1 +# warn|info|debug +LOG_LEVEL=info # Profiling config # Optional, required only if it needs to run memory profiling @@ -126,4 +59,71 @@ MALLOC_CONF="prof:true,prof_leak:true,prof_final:true,prof_active:true,prof_pref INTEGRITY_VERIFICATION_TEST_FILE_PATH="./test_keys/test_keys.txt" INTEGRITY_VERIFICATION_TEST_FILE_PATH_CONTAINER="/test_keys/test_keys.txt" INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH="./slots_collect" -INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH_CONTAINER="/slots_collect" \ No newline at end of file +INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH_CONTAINER="/slots_collect" + +#Configurable app parts that cold be enabled or disabled. (values in the default positions) +RUN_API=true +RUN_BACKFILLER=true +RUN_BUBBLEGUM_BACKFILLER=true +RUN_GAPFILLER=false +SHOULD_REINGEST=false +RUN_PROFILING=false +RESTORE_ROCKS_DB=false +ENABLE_ROCKS_MIGRATION=true +CHECK_PROOFS=false +SKIP_CHECK_TREE_GAPS=false + +#Changes (todo Remove after review/migration) + +# API_RPC_HOST INGESTER_RPC_HOST -> RPC_HOST +#INGESTER_ROCKS_DB_PATH -> ROCKS_DB_PATH +#INGESTER_ROCKS_DB_PATH_CONTAINER -> ROCKS_DB_PATH_CONTAINER +#INGESTER_SYNCHRONIZER_DUMP_PATH -> ROCKS_DUMP_PATH +#API_ROCKS_DB_PATH_CONTAINER -> ROCKS_DB_PATH_CONTAINER +#INGESTER_FILE_STORAGE_PATH -> FILE_STORAGE_PATH +#INGESTER_FILE_STORAGE_PATH_CONTAINER -> FILE_STORAGE_PATH_CONTAINER +# INGESTER_PROFILING_FILE_PATH -> PROFILING_FILE_PATH +# INGESTER_PROFILING_FILE_PATH_CONTAINER -> PROFILING_FILE_PATH_CONTAINER +# INGESTER_MIGRATION_STORAGE_PATH -> ROCKS_MIGRATION_STORAGE_PATH +# +# INGESTER_ROCKS_BACKUP_ARCHIVES_DIR -> ROCKS_BACKUP_ARCHIVES_DIR +# INGESTER_ROCKS_BACKUP_DIR -> ROCKS_BACKUP_DIR +# +#SYNCHRONIZER_DUMP_PATH -> ROCKS_DUMP_PATH +#INGESTER_SLOTS_DB_PATH -> ROCKS_SLOTS_DB_PATH +#INGESTER_SECONDARY_SLOTS_DB_PATH -> ROCKS_SECONDARY_SLOTS_DB_PATH + +#API_DATABASE_CONFIG -> PG_MAX_DB_CONNECTIONS and PG_DATABASE_URL + +#SYNCHRONIZER_DUMP_PATH -> ROCKS_DUMP_PATH +#SYNCHRONIZER_ROCKS_DB_SECONDARY_PATH_CONTAINER -> ROCKS_DB_SECONDARY_PATH_CONTAINER +#SYNCHRONIZER_DUMP_SYNCHRONIZER_BATCH_SIZE -> DUMP_SYNCHRONIZER_BATCH_SIZE +#SYNCHRONIZER_DUMP_SYNC_THRESHOLD -> DUMP_SYNC_THRESHOLD + +#API_ARCHIVES_DIR -> rocks_archives_dir +#API_ROCKS_DB_PATH_CONTAINER -> ROCKS_DB_PATH_CONTAINER +#API_ROCKS_DB_SECONDARY_PATH_CONTAINER -> ROCKS_DB_SECONDARY_PATH_CONTAINER +#API_FILE_STORAGE_PATH_CONTAINER -> FILE_STORAGE_PATH_CONTAINER +#API_JSON_MIDDLEWARE_CONFIG -> JSON_MIDDLEWARE_CONFIG +#API_CONSISTENCE_SYNCHRONIZATION_API_THRESHOLD - > CONSISTENCE_SYNCHRONIZATION_API_THRESHOLD +#API_CONSISTENCE_BACKFILLING_SLOTS_THRESHOLD -> CONSISTENCE_BACKFILLING_SLOTS_THRESHOLD +# +#API_SKIP_CHECK_TREE_GAPS -> SKIP_CHECK_TREE_GAPS +# INGESTER_REDIS_MESSENGER_CONFIG -> REDIS_CONNECTION_CONFIG and note the differnce (less complex type) +# INGESTER_BACKFILLER_SOURCE_MODE -> BACKFILLER_SOURCE_MODE +# INGESTER_BIG_TABLE_CONFIG -> BIG_TABLE_CONFIG +# both API_PEER_GRPC_PORT and INGESTER_PEER_GRPC_PORT were replaced with a single PEER_GRPC_PORT - only one will actually work +# API_PEER_GRPC_MAX_GAP_SLOTS and INGESTER_PEER_GRPC_MAX_GAP_SLOTS -> PEER_GRPC_MAX_GAP_SLOTS (optional) +# INGESTER_ROCKS_SYNC_INTERVAL_SECONDS and API_ROCKS_SYNC_INTERVAL_SECONDS -> ROCKS_SYNC_INTERVAL_SECONDS (optional) +# INGESTER_GAPFILLER_PEER_ADDR -> GAPFILLER_PEER_ADDR (optional, only if run_gapfiller is set) +# INGESTER_ACCOUNTS_BUFFER_SIZE -> ACCOUNT_PROCESSOR_BUFFER_SIZE (has default value, may be skipped) +# INGESTER_ACCOUNTS_PARSING_WORKERS -> REDIS_ACCOUNTS_PARSING_WORKERS +# INGESTER_ROCKS_FLUSH_BEFORE_BACKUP -> ROCKS_FLUSH_BEFORE_BACKUP +# INGESTER_ROCKS_INTERVAL_IN_SECONDS -> ROCKS_INTERVAL_IN_SECONDS +# INGESTER_TRANSACTIONS_PARSING_WORKERS -> REDIS_TRANSACTIONS_PARSING_WORKERS +# INGESTER_SHOULD_REINGEST -> SHOULD_REINGEST +# INGESTER_RUN_SEQUENCE_CONSISTENT_CHECKER -> RUN_SEQUENCE_CONSISTENT_CHECKER +# INGESTER_RUN_PROFILING -> RUN_PROFILING +# INGESTER_RUN_BUBBLEGUM_BACKFILLER -> RUN_BUBBLEGUM_BACKFILLER +# Removed: +# INGESTER_BACKFILL_RPC_ADDRESS, INGESTER_BACKFILLER_MODE, INGESTER_MESSAGE_SOURCE (redis is used as an only option as of now), INGESTER_DISABLE_SYNCHRONIZER (synchronizer is no longer part of the ingester), INGESTER_CHUNK_SIZE, INGESTER_PERMITTED_TASKS, INGESTER_TCP_CONFIG, INGESTER_WORKERS_COUNT, INGESTER_WAIT_PERIOD_SEC, INGESTER_SNAPSHOT_PARSING_WORKERS, INGESTER_SNAPSHOT_PARSING_BATCH_SIZE, INGESTER_SLOT_UNTIL, INGESTER_SLOT_START_FROM, INGESTER_RUN_FORK_CLEANER, INGESTER_RUN_DUMP_SYNCHRONIZE_ON_START diff --git a/.env.example_new b/.env.example_new deleted file mode 100644 index 37074bdb..00000000 --- a/.env.example_new +++ /dev/null @@ -1,132 +0,0 @@ -RPC_HOST='https://mainnet-aura.metaplex.com/{personal_rpc_key}' - -#Postgres -PG_DATABASE_URL='postgres://solana:solana@localhost:5432/aura_db' - -#Redis -REDIS_CONNECTION_CONFIG='{"redis_connection_str":"redis://127.0.0.1:6379/0"}' - -#RocksDB -ROCKS_DB_PATH="/usr/src/rocksdb-data" -ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" -ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db" -# path to the slots data, required for the backfiller to work -ROCKS_SLOTS_DB_PATH=/path/to/slots-data -ROCKS_SECONDARY_SLOTS_DB_PATH=/path/to/secondary/ingester-slots -ROCKS_ARCHIVES_DIR="path/to/rocks/backup/archives" -ROCKS_BACKUP_ARCHIVES_DIR="path/to/rocks/backup/archives" -ROCKS_MIGRATION_STORAGE_PATH=/path/to/migration_storage - - -#Backfiller -BACKFILLER_SOURCE_MODE=bigtable -BIG_TABLE_CONFIG='{creds="/usr/src/app/creds.json", timeout=1000}' - -# Metrics port. Start HTTP server to report metrics if port exist. -API_METRICS_PORT=8985 -INGESTER_METRICS_PORT=9091 -MIGRATOR_METRICS_PORT=5091 -SYNCHRONIZER_METRICS_PORT=6091 - -# API server port (if API is enabled) -INGESTER_SERVER_PORT=9092 -API_SERVER_PORT=8990 - -# GRPC Server port -PEER_GRPC_PORT=9099 - -# Docker -# Required by Postgre container -POSTGRE_DB_PATH="postgre/db/path" -ROCKS_DB_PATH="path/to/rocks/on/disk" -ROCKS_BACKUP_DIR="path/to/rocks/backup/" -FILE_STORAGE_PATH="path/to/file/storage" -FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage" -ROCKS_DUMP_PATH="/path/to/dump" - - -#Profiling (optional) -PROFILING_FILE_PATH_CONTAINER="/usr/src/profiling" -PROFILING_FILE_PATH="/path/to/profiling" - -# DEV configuration -RUST_BACKTRACE=1 -# warn|info|debug -LOG_LEVEL=info - -# Profiling config -# Optional, required only if it needs to run memory profiling -MALLOC_CONF="prof:true,prof_leak:true,prof_final:true,prof_active:true,prof_prefix:/usr/src/app/heaps/,lg_prof_interval:32,lg_prof_sample:19" - -# Integrity verification -INTEGRITY_VERIFICATION_TEST_FILE_PATH="./test_keys/test_keys.txt" -INTEGRITY_VERIFICATION_TEST_FILE_PATH_CONTAINER="/test_keys/test_keys.txt" -INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH="./slots_collect" -INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH_CONTAINER="/slots_collect" - -#Configurable app parts that cold be enabled or disabled. (values in the default positions) -IS_RUN_API=true -IS_RUN_BACKFILLER=true -IS_RUN_BUBBLEGUM_BACKFILLER=true -IS_RUN_GAPFILLER=false -SHOULD_REINGEST=false -IS_RUN_PROFILING=false -IS_RESTORE_ROCKS_DB=false -IS_ENABLE_ROCKS_MIGRATION=true -CHECK_PROOFS=false -SKIP_CHECK_TREE_GAPS=false - -#Changes (todo Remove after review/migration) - -# API_RPC_HOST INGESTER_RPC_HOST -> RPC_HOST -#INGESTER_ROCKS_DB_PATH -> ROCKS_DB_PATH -#INGESTER_ROCKS_DB_PATH_CONTAINER -> ROCKS_DB_PATH_CONTAINER -#INGESTER_SYNCHRONIZER_DUMP_PATH -> ROCKS_DUMP_PATH -#API_ROCKS_DB_PATH_CONTAINER -> ROCKS_DB_PATH_CONTAINER -#INGESTER_FILE_STORAGE_PATH -> FILE_STORAGE_PATH -#INGESTER_FILE_STORAGE_PATH_CONTAINER -> FILE_STORAGE_PATH_CONTAINER -# INGESTER_PROFILING_FILE_PATH -> PROFILING_FILE_PATH -# INGESTER_PROFILING_FILE_PATH_CONTAINER -> PROFILING_FILE_PATH_CONTAINER -# INGESTER_MIGRATION_STORAGE_PATH -> ROCKS_MIGRATION_STORAGE_PATH -# -# INGESTER_ROCKS_BACKUP_ARCHIVES_DIR -> ROCKS_BACKUP_ARCHIVES_DIR -# INGESTER_ROCKS_BACKUP_DIR -> ROCKS_BACKUP_DIR -# -#SYNCHRONIZER_DUMP_PATH -> ROCKS_DUMP_PATH -#INGESTER_SLOTS_DB_PATH -> ROCKS_SLOTS_DB_PATH -#INGESTER_SECONDARY_SLOTS_DB_PATH -> ROCKS_SECONDARY_SLOTS_DB_PATH - -#API_DATABASE_CONFIG -> PG_MAX_DB_CONNECTIONS and PG_DATABASE_URL - -#SYNCHRONIZER_DUMP_PATH -> ROCKS_DUMP_PATH -#SYNCHRONIZER_ROCKS_DB_SECONDARY_PATH_CONTAINER -> ROCKS_DB_SECONDARY_PATH_CONTAINER -#SYNCHRONIZER_DUMP_SYNCHRONIZER_BATCH_SIZE -> DUMP_SYNCHRONIZER_BATCH_SIZE -#SYNCHRONIZER_DUMP_SYNC_THRESHOLD -> DUMP_SYNC_THRESHOLD - -#API_ARCHIVES_DIR -> rocks_archives_dir -#API_ROCKS_DB_PATH_CONTAINER -> ROCKS_DB_PATH_CONTAINER -#API_ROCKS_DB_SECONDARY_PATH_CONTAINER -> ROCKS_DB_SECONDARY_PATH_CONTAINER -#API_FILE_STORAGE_PATH_CONTAINER -> FILE_STORAGE_PATH_CONTAINER -#API_JSON_MIDDLEWARE_CONFIG -> JSON_MIDDLEWARE_CONFIG -#API_CONSISTENCE_SYNCHRONIZATION_API_THRESHOLD - > CONSISTENCE_SYNCHRONIZATION_API_THRESHOLD -#API_CONSISTENCE_BACKFILLING_SLOTS_THRESHOLD -> CONSISTENCE_BACKFILLING_SLOTS_THRESHOLD -# -#API_SKIP_CHECK_TREE_GAPS -> SKIP_CHECK_TREE_GAPS -# INGESTER_REDIS_MESSENGER_CONFIG -> REDIS_CONNECTION_CONFIG and note the differnce (less complex type) -# INGESTER_BACKFILLER_SOURCE_MODE -> BACKFILLER_SOURCE_MODE -# INGESTER_BIG_TABLE_CONFIG -> BIG_TABLE_CONFIG -# both API_PEER_GRPC_PORT and INGESTER_PEER_GRPC_PORT were replaced with a single PEER_GRPC_PORT - only one will actually work -# API_PEER_GRPC_MAX_GAP_SLOTS and INGESTER_PEER_GRPC_MAX_GAP_SLOTS -> PEER_GRPC_MAX_GAP_SLOTS (optional) -# INGESTER_ROCKS_SYNC_INTERVAL_SECONDS and API_ROCKS_SYNC_INTERVAL_SECONDS -> ROCKS_SYNC_INTERVAL_SECONDS (optional) -# INGESTER_GAPFILLER_PEER_ADDR -> GAPFILLER_PEER_ADDR (optional, only if is_run_gapfiller is set) -# INGESTER_ACCOUNTS_BUFFER_SIZE -> ACCOUNT_PROCESSOR_BUFFER_SIZE (has default value, may be skipped) -# INGESTER_ACCOUNTS_PARSING_WORKERS -> REDIS_ACCOUNTS_PARSING_WORKERS -# INGESTER_ROCKS_FLUSH_BEFORE_BACKUP -> ROCKS_FLUSH_BEFORE_BACKUP -# INGESTER_ROCKS_INTERVAL_IN_SECONDS -> ROCKS_INTERVAL_IN_SECONDS -# INGESTER_TRANSACTIONS_PARSING_WORKERS -> REDIS_TRANSACTIONS_PARSING_WORKERS -# INGESTER_SHOULD_REINGEST -> SHOULD_REINGEST -# INGESTER_RUN_SEQUENCE_CONSISTENT_CHECKER -> RUN_SEQUENCE_CONSISTENT_CHECKER -# INGESTER_RUN_PROFILING -> IS_RUN_PROFILING -# INGESTER_RUN_BUBBLEGUM_BACKFILLER -> IS_RUN_BUBBLEGUM_BACKFILLER -# Removed: -# INGESTER_BACKFILL_RPC_ADDRESS, INGESTER_BACKFILLER_MODE, INGESTER_MESSAGE_SOURCE (redis is used as an only option as of now), INGESTER_DISABLE_SYNCHRONIZER (synchronizer is no longer part of the ingester), INGESTER_CHUNK_SIZE, INGESTER_PERMITTED_TASKS, INGESTER_TCP_CONFIG, INGESTER_WORKERS_COUNT, INGESTER_WAIT_PERIOD_SEC, INGESTER_SNAPSHOT_PARSING_WORKERS, INGESTER_SNAPSHOT_PARSING_BATCH_SIZE, INGESTER_SLOT_UNTIL, INGESTER_SLOT_START_FROM, INGESTER_RUN_FORK_CLEANER, INGESTER_RUN_DUMP_SYNCHRONIZE_ON_START diff --git a/.env.example_old b/.env.example_old new file mode 100644 index 00000000..69a93019 --- /dev/null +++ b/.env.example_old @@ -0,0 +1,129 @@ +# Required by Postgre container +POSTGRE_DB_PATH="postgre/db/path" + +RUST_BACKTRACE=1 +# Ingester instance config +INGESTER_LOG_LEVEL=info + +INGESTER_DATABASE_CONFIG='{max_postgres_connections=10, url="postgres://user:pass@0.0.0.0:5432/database"}' +INGESTER_TCP_CONFIG='{receiver_addr="localhost:2000", receiver_reconnect_interval=5, snapshot_receiver_addr="localhost:5000"}' +INGESTER_REDIS_MESSENGER_CONFIG='{messenger_type="Redis", connection_config={redis_connection_str="redis://:pass@localhost:6379"}}' +INGESTER_MESSAGE_SOURCE=Redis + +INGESTER_ACCOUNTS_BUFFER_SIZE=250 +INGESTER_ACCOUNTS_PARSING_WORKERS=20 +INGESTER_TRANSACTIONS_PARSING_WORKERS=20 + +INGESTER_SNAPSHOT_PARSING_WORKERS=1 +INGESTER_SNAPSHOT_PARSING_BATCH_SIZE=250 + +INGESTER_GAPFILLER_PEER_ADDR="0.0.0.0" +INGESTER_METRICS_PORT=9091 +INGESTER_SERVER_PORT=9092 +INGESTER_PEER_GRPC_PORT=9099 + +INGESTER_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" +INGESTER_ROCKS_DB_PATH="path/to/rocks/on/disk" + +INGESTER_ARCHIVES_DIR="path/to/rocks/backup/archives" +INGESTER_ROCKS_BACKUP_ARCHIVES_DIR="path/to/rocks/backup/archives" +INGESTER_ROCKS_BACKUP_DIR="path/to/rocks/backup/" + +INGESTER_BACKFILL_RPC_ADDRESS='https://rpc:port' +INGESTER_RPC_HOST='https://rpc:port' + +INGESTER_BACKFILLER_SOURCE_MODE=RPC #RPC or Bigtable +INGESTER_BIG_TABLE_CONFIG='{creds="/usr/src/app/creds.json", timeout=1000}' + +INGESTER_RUN_SEQUENCE_CONSISTENT_CHECKER=false # experimental, enable only for testing purposes +# Optional, required only if it needs to run fork cleaner, default is false. Unstable as it removes forked items, but also removes some valid leafs. Recommended to use only! for testing purposes. +INGESTER_RUN_FORK_CLEANER=false +INGESTER_RUN_BUBBLEGUM_BACKFILLER=true + +INGESTER_BACKFILLER_MODE=PersistAndIngest # The only available option, the variable will be removed +INGESTER_SLOT_UNTIL=0 +INGESTER_SLOT_START_FROM=0 +INGESTER_WORKERS_COUNT=100 +INGESTER_CHUNK_SIZE=20 +INGESTER_PERMITTED_TASKS=1 +INGESTER_WAIT_PERIOD_SEC=30 +INGESTER_SHOULD_REINGEST=false + +INGESTER_PEER_GRPC_MAX_GAP_SLOTS=1000000 + +INGESTER_RUN_PROFILING=false +INGESTER_PROFILING_FILE_PATH_CONTAINER="/usr/src/profiling" +INGESTER_PROFILING_FILE_PATH="/path/to/profiling" + +INGESTER_FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage" +INGESTER_FILE_STORAGE_PATH="path/to/file/storage" +INGESTER_MIGRATION_STORAGE_PATH=/path/to/migration_storage # requires explanation + +INGESTER_ROCKS_FLUSH_BEFORE_BACKUP=false +INGESTER_ROCKS_INTERVAL_IN_SECONDS=3600 +INGESTER_ROCKS_SYNC_INTERVAL_SECONDS=2 + +INGESTER_SYNCHRONIZER_DUMP_PATH="/path/to/dump" +INGESTER_DISABLE_SYNCHRONIZER=true +INGESTER_SKIP_CHECK_TREE_GAPS=true +# path to the slots data, required for the backfiller to work +INGESTER_SLOTS_DB_PATH=/path/to/slots-data +INGESTER_SECONDARY_SLOTS_DB_PATH=/path/to/secondary/ingester-slots # should be removed + +# a common log level for all instances, will be overridden by specific log levels, requires refactoring +RUST_LOG=info +# API instance config +API_LOG_LEVEL=info + +API_DATABASE_CONFIG='{max_postgres_connections=250, url="postgres://user:pass@0.0.0.0:5432/database"}' + +API_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" +API_ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db" +API_ARCHIVES_DIR="path/to/rocks/backup/archives" + +API_PEER_GRPC_PORT=8991 +API_METRICS_PORT=8985 +API_SERVER_PORT=8990 + +API_RPC_HOST='https://rpc:port' + +API_ROCKS_SYNC_INTERVAL_SECONDS=2 +API_FILE_STORAGE_PATH_CONTAINER="/usr/src/app/file_storage" +API_FILE_STORAGE_PATH="path/to/file/storage" + +API_PEER_GRPC_MAX_GAP_SLOTS=1000000 +API_JSON_MIDDLEWARE_CONFIG='{is_enabled=true, max_urls_to_parse=10}' + +API_CONSISTENCE_SYNCHRONIZATION_API_THRESHOLD=1000000 +API_CONSISTENCE_BACKFILLING_SLOTS_THRESHOLD=500 + +# if set to true API will not check if tree where user requests assets from has any gaps +API_SKIP_CHECK_TREE_GAPS=true + +# Synchronizer instance config +SYNCHRONIZER_LOG_LEVEL=info + +SYNCHRONIZER_DATABASE_CONFIG='{max_postgres_connections=100, url="postgres://user:pass@0.0.0.0:5432/database"}' +SYNCHRONIZER_ROCKS_DB_PATH_CONTAINER="/usr/src/rocksdb-data" +SYNCHRONIZER_ROCKS_DB_SECONDARY_PATH_CONTAINER="path/to/rocks/secondary/db" # should be removed + +SYNCHRONIZER_METRICS_PORT=6091 + +SYNCHRONIZER_DUMP_PATH="/path/to/migration_data" + +SYNCHRONIZER_DUMP_SYNCHRONIZER_BATCH_SIZE=10000 +# threshold on the number of updates not being synchronized for the synchronizer to dump-load on start +# 150M - that's a rough threshold after which the synchronizer will likely complete a full dymp-load cycle faster then doing an incremental sync +SYNCHRONIZER_DUMP_SYNC_THRESHOLD=150000000 + +SYNCHRONIZER_PARALLEL_TASKS=30 + +# Profiling config +# Optional, required only if it needs to run memory profiling +MALLOC_CONF="prof:true,prof_leak:true,prof_final:true,prof_active:true,prof_prefix:/usr/src/app/heaps/,lg_prof_interval:32,lg_prof_sample:19" + +# Integrity verification +INTEGRITY_VERIFICATION_TEST_FILE_PATH="./test_keys/test_keys.txt" +INTEGRITY_VERIFICATION_TEST_FILE_PATH_CONTAINER="/test_keys/test_keys.txt" +INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH="./slots_collect" +INTEGRITY_VERIFICATION_SLOTS_COLLECT_PATH_CONTAINER="/slots_collect" \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 3019d3c7..95d3701a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "Inflector" @@ -701,9 +701,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anstyle-parse" @@ -1956,9 +1956,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.4" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" +checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" dependencies = [ "clap_builder", "clap_derive", @@ -1966,21 +1966,21 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.2" +version = "4.5.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" dependencies = [ "anstream", "anstyle", - "clap_lex 0.7.0", + "clap_lex 0.7.4", "strsim 0.11.1", ] [[package]] name = "clap_derive" -version = "4.5.4" +version = "4.5.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" +checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" dependencies = [ "heck 0.5.0", "proc-macro2", @@ -1999,9 +1999,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" +checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "clipboard-win" @@ -2082,7 +2082,7 @@ name = "consistency_check" version = "0.1.0" dependencies = [ "bincode", - "clap 4.5.4", + "clap 4.5.26", "csv", "entities", "indicatif", @@ -2241,7 +2241,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.4", + "clap 4.5.26", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -3883,7 +3883,7 @@ dependencies = [ "anchor-lang 0.30.1", "assert-json-diff", "async-trait", - "clap 4.5.4", + "clap 4.5.26", "dotenvy", "entities", "env_logger 0.10.2", @@ -4774,7 +4774,7 @@ dependencies = [ "bs58 0.4.0", "bubblegum-batch-sdk", "chrono", - "clap 4.5.4", + "clap 4.5.26", "coingecko", "criterion", "entities", diff --git a/Cargo.toml b/Cargo.toml index 8d305e43..7c7eb397 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,7 +34,6 @@ flatbuffers = "23.1.21" plerkle_serialization = "1.9.0" plerkle_messenger = { version = "1.9.0", features = ['redis'] } borsh = "~0.10.3" -borsh-derive = "~0.10.3" # Database infrastructure @@ -84,7 +83,7 @@ num-traits = "0.2.17" # Configuration, env-vars and cli parsing figment = { version = "0.10.6", features = ["env", "toml", "yaml"] } -clap = { version = "4.2.2", features = ["derive", "cargo"] } +clap = { version = "4.5.26", features = ["derive", "cargo"] } dotenvy = "0.15.7" indicatif = "0.17" @@ -104,7 +103,6 @@ futures-util = "0.3.29" async-recursion = "1.1.1" async-channel = "2.3.1" stretto = { version = "0.8.4", features = ["async"] } -triomphe = "=0.1.9" # Lazy once_cell = "1.19.0" lazy_static = "1.4.0" diff --git a/nft_ingester/src/bin/api/main.rs b/nft_ingester/src/bin/api/main.rs index 368138ee..1ecffcab 100644 --- a/nft_ingester/src/bin/api/main.rs +++ b/nft_ingester/src/bin/api/main.rs @@ -30,7 +30,7 @@ pub async fn main() -> Result<(), IngesterError> { info!("Starting API server..."); - let guard = if args.is_run_profiling { + let guard = if args.run_profiling { Some(pprof::ProfilerGuardBuilder::default().frequency(100).build().unwrap()) } else { None diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index a7fbab24..a2870025 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -84,21 +84,26 @@ pub async fn main() -> Result<(), IngesterError> { info!("Starting Ingester..."); info!("___________________________________",); - info!("API: {}", args.is_run_api); - if args.is_run_api { + info!("API: {}", args.run_api); + if args.run_api { info!("API port: localhost:{}", args.server_port); } - info!("Back Filler: {}", args.is_run_backfiller); - info!("Bubblegum BackFiller: {}", args.is_run_bubblegum_backfiller); - info!("Gap Filler: {}", args.is_run_gapfiller); - info!("Run Profiling: {}", args.is_run_profiling); + info!("Back Filler: {}", args.run_backfiller); + info!("Bubblegum BackFiller: {}", args.run_bubblegum_backfiller); + info!("Gap Filler: {}", args.run_gapfiller); + info!("Run Profiling: {}", args.run_profiling); info!("Sequence Consistent Checker: {}", args.run_sequence_consistent_checker); + info!("Sequence Consistent Checker: {}", args.run_sequence_consistent_checker); + info!("Account redis parsing workers: {}", args.redis_accounts_parsing_workers); + info!("Account processor buffer size: {}", args.account_processor_buffer_size); + info!("Tx redis parsing workers: {}", args.redis_transactions_parsing_workers); + info!("Tx processor buffer size: {}", args.tx_processor_buffer_size); info!("___________________________________",); let mut metrics_state = MetricState::new(); metrics_state.register_metrics(); - let guard = args.is_run_profiling.then(|| { + let guard = args.run_profiling.then(|| { ProfilerGuardBuilder::default() .frequency(100) .build() @@ -126,7 +131,7 @@ pub async fn main() -> Result<(), IngesterError> { let primary_rocks_storage = Arc::new( init_primary_storage( &args.rocks_db_path_container, - args.rocks_enable_migration, + args.enable_rocks_migration, &args.rocks_migration_storage_path, &metrics_state, mutexed_tasks.clone(), @@ -164,8 +169,9 @@ pub async fn main() -> Result<(), IngesterError> { let ack_channel = create_ack_channel(cloned_rx, message_config.clone(), mutexed_tasks.clone()).await; - for _ in 0..args.redis_accounts_parsing_workers { + for index in 0..args.redis_accounts_parsing_workers { let account_consumer_worker_name = Uuid::new_v4().to_string(); + info!("New Redis account worker {}: {}", index, account_consumer_worker_name); let personal_message_config = MessengerConfig { messenger_type: MessengerType::Redis, @@ -204,12 +210,15 @@ pub async fn main() -> Result<(), IngesterError> { .await; } - for _ in 0..args.redis_transactions_parsing_workers { + for index in 0..args.redis_transactions_parsing_workers { + let tx_consumer_worker_name = Uuid::new_v4().to_string(); + info!("New Redis tx worker {} : {}", index, tx_consumer_worker_name); + let personal_message_config = MessengerConfig { messenger_type: MessengerType::Redis, connection_config: { let mut config = args.redis_connection_config.clone(); - config.insert("consumer_id".to_string(), Uuid::new_v4().to_string().into()); + config.insert("consumer_id".to_string(), tx_consumer_worker_name.into()); config .entry("batch_size".to_string()) .or_insert_with(|| args.tx_processor_buffer_size.into()); @@ -267,7 +276,7 @@ pub async fn main() -> Result<(), IngesterError> { .await, ); - if args.is_run_gapfiller { + if args.run_gapfiller { info!("Start gapfiller..."); let gaped_data_client = Client::connect(&args.gapfiller_peer_addr.expect("gapfiller peer address is expected")) @@ -319,7 +328,7 @@ pub async fn main() -> Result<(), IngesterError> { let cloned_rx = shutdown_rx.resubscribe(); let file_storage_path = args.file_storage_path_container.clone(); - if args.is_run_api { + if args.run_api { info!("Starting API (Ingester)..."); let middleware_json_downloader = args .json_middleware_config @@ -382,7 +391,7 @@ pub async fn main() -> Result<(), IngesterError> { let shutdown_token = CancellationToken::new(); // Backfiller - if args.is_run_backfiller { + if args.run_backfiller { info!("Start backfiller..."); let backfill_bubblegum_updates_processor = Arc::new(BubblegumTxProcessor::new( @@ -417,7 +426,7 @@ pub async fn main() -> Result<(), IngesterError> { .await, ); - if args.is_run_bubblegum_backfiller { + if args.run_bubblegum_backfiller { info!("Runing Bubblegum backfiller (ingester)..."); if args.should_reingest { diff --git a/nft_ingester/src/bin/ingester/readme.md b/nft_ingester/src/bin/ingester/readme.md index dd7a27ef..11e288c0 100644 --- a/nft_ingester/src/bin/ingester/readme.md +++ b/nft_ingester/src/bin/ingester/readme.md @@ -27,7 +27,11 @@ Run indexer with minimum functionality. (without API/Back Filler/Bubblegum BackF ./target/debug/ingester \ --pg-database-url postgres://solana:solana@localhost:5432/aura_db \ --rpc-host https://mainnet-aura.metaplex.com/{personal_rpc_key} \ - --redis-connection-config '{"redis_connection_str":"redis://127.0.0.1:6379/0"}' + --redis-connection-config '{"redis_connection_str":"redis://127.0.0.1:6379/0"}' \ + --disable-api \ + --disable-backfiller \ + --disable-rocks-migration + ``` ### Main components that can be run with ingester diff --git a/nft_ingester/src/bin/synchronizer/main.rs b/nft_ingester/src/bin/synchronizer/main.rs index e2452e4f..97af67ab 100644 --- a/nft_ingester/src/bin/synchronizer/main.rs +++ b/nft_ingester/src/bin/synchronizer/main.rs @@ -30,7 +30,7 @@ pub async fn main() -> Result<(), IngesterError> { tracing::info!("Starting Synchronizer server..."); - let guard = if args.is_run_profiling { + let guard = if args.run_profiling { Some(pprof::ProfilerGuardBuilder::default().frequency(100).build().unwrap()) } else { None diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index b4c5a301..5fa2cb32 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -1,4 +1,4 @@ -use clap::{Parser, ValueEnum}; +use clap::{ArgAction, Parser, ValueEnum}; use figment::value::Dict; use serde::Deserialize; use solana_sdk::commitment_config::CommitmentLevel; @@ -6,7 +6,7 @@ use tracing_subscriber::fmt; use crate::error::IngesterError; -#[derive(Parser, Debug)] +#[derive(clap::Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct IngesterClapArgs { #[clap( @@ -68,43 +68,44 @@ pub struct IngesterClapArgs { pub parallel_json_downloaders: i32, #[clap( - long("run_api"), + long("disable-api"), default_value_t = true, - env = "IS_RUN_API", - help = "Run API (default: false)" + action = ArgAction::SetFalse, + env = "RUN_API", + help = "Disable API (default: true)" )] - pub is_run_api: bool, + pub run_api: bool, #[clap( - long("run_gapfiller"), + long("run-gapfiller"), default_value_t = false, - env = "IS_RUN_GAPFILLER", - help = "Start gapfiller", + env = "RUN_GAPFILLER", + help = "Start gapfiller. (default: false)", requires = "gapfiller_peer_addr" )] - pub is_run_gapfiller: bool, + pub run_gapfiller: bool, #[clap(long, env, default_value = "0.0.0.0", help = "Gapfiller peer address")] pub gapfiller_peer_addr: Option, #[clap( - long("run_profiling"), + long("run-profiling"), default_value_t = false, - env = "IS_RUN_PROFILING", + env = "INGESTER_RUN_PROFILING", help = "Start profiling (default: false)" )] - pub is_run_profiling: bool, + pub run_profiling: bool, #[clap(long, env, value_parser = parse_json_to_json_middleware_config, help = "Example: {'is_enabled':true, 'max_urls_to_parse':10} ",)] pub json_middleware_config: Option, // Group: Rocks DB Configuration #[clap( - long("restore_rocks_db"), + long("restore-rocks-db"), default_value_t = false, - env = "IS_RESTORE_ROCKS_DB", + env = "RESTORE_ROCKS_DB", help = "Try restore rocks (default: false)", - requires = "rocks_backup_url", //todo: if true + requires = "rocks_backup_url", requires = "rocks_backup_archives_dir" )] pub is_restore_rocks_db: bool, @@ -112,17 +113,20 @@ pub struct IngesterClapArgs { pub rocks_backup_url: Option, #[clap(long, env, help = "Rocks backup archives dir")] pub rocks_backup_archives_dir: Option, + + // requires = "rocks_migration_storage_path" is not working because default value is true. (clap issue) #[clap( - long, - env = "IS_ENABLE_ROCKS_MIGRATION", + long("disable-rocks-migration"), + env = "ENABLE_ROCKS_MIGRATION", + action = ArgAction::SetFalse, default_value_t = true, - help = "Enable migration for rocksdb (default: true)" + help = "Disable migration for rocksdb (default: true) requires: rocks_migration_storage_path" )] - pub rocks_enable_migration: bool, + pub enable_rocks_migration: bool, #[clap(long, env, help = "Migration storage path dir")] pub rocks_migration_storage_path: Option, - #[clap(long, env, default_value_t = false, help = "Start consistent checker (default: false)")] + #[clap(long, env, help = "Start consistent checker (default: false)")] pub run_sequence_consistent_checker: bool, #[clap( @@ -177,14 +181,15 @@ pub struct IngesterClapArgs { #[clap(long, env, help = "#api Storage service base url")] pub storage_service_base_url: Option, + // requires = "rocks_slots_db_path" is not working because default value is true. #[clap( - long, - env = "IS_RUN_BACKFILLER", + long("disable-backfiller"), + action = ArgAction::SetFalse, + env = "RUN_BACKFILLER", default_value_t = true, - help = "Start backfiller (default: true)", - requires = "rocks_slots_db_path" + help = "Disable backfiller. (default: true) requires: rocks_slots_db_path", )] - pub is_run_backfiller: bool, + pub run_backfiller: bool, #[clap( long, env, @@ -208,12 +213,13 @@ pub struct IngesterClapArgs { pub big_table_config: Option, #[clap( - long, - env = "IS_RUN_BUBBLEGUM_BACKFILLER", + long("disable-bubblegum-backfiller"), + action = ArgAction::SetFalse, + env = "RUN_BUBBLEGUM_BACKFILLER", default_value_t = true, - help = "#bubbl Start bubblegum backfiller (default: true)" + help = "#bubbl Disable bubblegum backfiller (default: true)" )] - pub is_run_bubblegum_backfiller: bool, + pub run_bubblegum_backfiller: bool, #[clap( long, env = "SHOULD_REINGEST", @@ -279,12 +285,12 @@ pub struct SynchronizerClapArgs { pub rocks_dump_path: String, #[clap( - long("run_profiling"), - env = "IS_RUN_PROFILING", + long("run-profiling"), + env = "SYNCHRONIZER_RUN_PROFILING", default_value_t = false, help = "Start profiling (default: false)" )] - pub is_run_profiling: bool, + pub run_profiling: bool, #[clap(long, env, default_value = "/usr/src/app/heaps", help = "Heap path")] pub heap_path: String, @@ -411,12 +417,12 @@ pub struct ApiClapArgs { )] pub skip_check_tree_gaps: bool, #[clap( - env = "IS_RUN_PROFILING", - long("run_profiling"), + env = "API_RUN_PROFILING", + long("run-profiling"), default_value_t = false, - help = "Start profiling (default: false)" + help = "Start profiling. (default: false)" )] - pub is_run_profiling: bool, + pub run_profiling: bool, #[clap( long, @@ -575,18 +581,18 @@ mod tests { assert_eq!(args.pg_max_db_connections, 100); assert_eq!(args.sequence_consistent_checker_wait_period_sec, 60); assert_eq!(args.parallel_json_downloaders, 100); - assert_eq!(args.is_run_api, true); - assert_eq!(args.is_run_gapfiller, false); - assert_eq!(args.is_run_profiling, false); + assert_eq!(args.run_api, true); + assert_eq!(args.run_gapfiller, false); + assert_eq!(args.run_profiling, false); assert_eq!(args.is_restore_rocks_db, false); - assert_eq!(args.is_run_bubblegum_backfiller, true); + assert_eq!(args.run_bubblegum_backfiller, true); assert_eq!(args.run_sequence_consistent_checker, false); assert_eq!(args.should_reingest, false); assert_eq!(args.check_proofs, false); assert_eq!(args.check_proofs_commitment, CommitmentLevel::Finalized); assert_eq!(args.archives_dir, "/rocksdb/_rocks_backup_archives"); assert_eq!(args.skip_check_tree_gaps, false); - assert_eq!(args.is_run_backfiller, true); + assert_eq!(args.run_backfiller, true); assert_eq!(args.backfiller_source_mode, BackfillerSourceMode::RPC); assert_eq!(args.heap_path, "/usr/src/app/heaps"); assert_eq!(args.log_level, "info"); @@ -604,7 +610,7 @@ mod tests { assert_eq!(args.pg_max_db_connections, 100); assert_eq!(args.rocks_db_path_container, "./my_rocksdb"); assert_eq!(args.rocks_db_secondary_path, "./my_rocksdb_secondary"); - assert_eq!(args.is_run_profiling, false); + assert_eq!(args.run_profiling, false); assert_eq!(args.heap_path, "/usr/src/app/heaps"); assert_eq!(args.dump_synchronizer_batch_size, 200000); assert_eq!(args.dump_sync_threshold, 150000000); @@ -628,7 +634,7 @@ mod tests { assert_eq!(args.rocks_sync_interval_seconds, 2); assert_eq!(args.heap_path, "/usr/src/app/heaps"); assert_eq!(args.skip_check_tree_gaps, false); - assert_eq!(args.is_run_profiling, false); + assert_eq!(args.run_profiling, false); assert_eq!(args.check_proofs, false); assert_eq!(args.check_proofs_probability, 0.1); assert_eq!(args.check_proofs_commitment, CommitmentLevel::Finalized); From 4ca6f659ef6cc4ae6efb8e8c6bd7468ecb00aa20 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Mon, 20 Jan 2025 14:23:51 +0100 Subject: [PATCH 03/33] MTG-1223 fix log parameter --- nft_ingester/src/bin/ingester/main.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index a2870025..bf9bf4c4 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -93,7 +93,6 @@ pub async fn main() -> Result<(), IngesterError> { info!("Gap Filler: {}", args.run_gapfiller); info!("Run Profiling: {}", args.run_profiling); info!("Sequence Consistent Checker: {}", args.run_sequence_consistent_checker); - info!("Sequence Consistent Checker: {}", args.run_sequence_consistent_checker); info!("Account redis parsing workers: {}", args.redis_accounts_parsing_workers); info!("Account processor buffer size: {}", args.account_processor_buffer_size); info!("Tx redis parsing workers: {}", args.redis_transactions_parsing_workers); From daac082cd0b16269baba2442e534cc12a3fdff00 Mon Sep 17 00:00:00 2001 From: Vadim <31490938+n00m4d@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:50:37 +0100 Subject: [PATCH 04/33] test: add new getAsset test for regular nft collection (#377) --- ...yuwGzav7jTW9YaBGj2Qtp2q24zPUR3rD5caojXaby4 | Bin 0 -> 824 bytes ...RSMZ6XHyiy45kN84F2YRhpX5SVwtyWN1YPzpyMZNBx | Bin 0 -> 312 bytes ...S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w | Bin 0 -> 224 bytes ...S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w | 1 + integration_tests/src/regular_nft_tests.rs | 28 +++++++ ...lar_nft_tests__regular_nft_collection.snap | 76 ++++++++++++++++++ 6 files changed, 105 insertions(+) create mode 100644 integration_tests/src/data/accounts/regular_nft_collection/8KyuwGzav7jTW9YaBGj2Qtp2q24zPUR3rD5caojXaby4 create mode 100644 integration_tests/src/data/accounts/regular_nft_collection/BvRSMZ6XHyiy45kN84F2YRhpX5SVwtyWN1YPzpyMZNBx create mode 100644 integration_tests/src/data/accounts/regular_nft_collection/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w create mode 100644 integration_tests/src/data/largest_token_account_ids/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w create mode 100644 integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap diff --git a/integration_tests/src/data/accounts/regular_nft_collection/8KyuwGzav7jTW9YaBGj2Qtp2q24zPUR3rD5caojXaby4 b/integration_tests/src/data/accounts/regular_nft_collection/8KyuwGzav7jTW9YaBGj2Qtp2q24zPUR3rD5caojXaby4 new file mode 100644 index 0000000000000000000000000000000000000000..e626eded36e962542adef2349f4c431edbfcd2dc GIT binary patch literal 824 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQ%(*w$Til+rEkpdup;a3Wy<}K;K_k+Y z;j7NVHOG@!qz`i!q;7nCvBuS6Zcu$T%i*&TJ93Jp7X01nx$WW}<{z4F8Xz;bgaSzh zhW}8&2<9;`=uBSHMt}+KTogNSg*8Lw>-7DMAt|!F*mU) zKQFPoST8w0S3e#}Sypj=9@w}6CB85+f&!CJATp*fFf#sSWMN=nWcZ*4aWNwUBWIgZ p>*GSM30Gz3xij6HHTUYvzbDo-Ub?jA;={WqRQ??qP_GRx%KE|Y!Qhxf{aL=&aB{#!b^j4SQ- zuE}AFJ9+Xs_j|Ux-yU91T{!vX>4$FTURSP>+0yzpKH7YuRnOPfy8jF-HGpP(kd+6L z3=IFFfDz1NV3;|ztz!z5vlJ-yXS1~b&u1CcuT4+a#IBaSted{9FFeWb^kM$ZYh+?L y;zX@ya(u3_epIleKxY1V6{EjV+op2J=NtH#8^5k)ch7{G3pE}u#e`QDza#+QWlf|2 literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/regular_nft_collection/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w b/integration_tests/src/data/accounts/regular_nft_collection/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w new file mode 100644 index 0000000000000000000000000000000000000000..9415c60ddd69970ea9229eb2876719e525511a05 GIT binary patch literal 224 zcmY#jfB*@G3er^HYhqVRUe--t))$`S zclt2@<~1@g9C2)SzdgL3x^VK%(+}Owy{=p%v!(TIe6;yQtDdi|b^jSwg3L_t6$6qC z4F92k5zJ#?_&BAlV+xQH1muIngm$RC+pe}$x@zj4&+WE`-S;_+>p6CFJZ<1_HQ31= OQ4Q0_z(|a01_l7{Dotqs literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/largest_token_account_ids/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w b/integration_tests/src/data/largest_token_account_ids/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w new file mode 100644 index 00000000..a1541cd1 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w/J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w @@ -0,0 +1 @@ +¢DH‡kËL Ôa4"^Ú,m©>½¬l^ÉÉç ï \ No newline at end of file diff --git a/integration_tests/src/regular_nft_tests.rs b/integration_tests/src/regular_nft_tests.rs index df9bd0ad..0eca3c00 100644 --- a/integration_tests/src/regular_nft_tests.rs +++ b/integration_tests/src/regular_nft_tests.rs @@ -164,3 +164,31 @@ async fn test_reg_search_assets() { let response = setup.das_api.search_assets(request, mutexed_tasks.clone()).await.unwrap(); insta::assert_json_snapshot!(name, response); } + +#[tokio::test] +#[serial] +#[named] +async fn test_regular_nft_collection() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { network: Some(Network::Mainnet), clear_db: true }, + ) + .await; + + let seeds: Vec = seed_nfts(["J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request, mutexed_tasks.clone()).await.unwrap(); + insta::assert_json_snapshot!(name.clone(), response); +} diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap new file mode 100644 index 00000000..42b83705 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap @@ -0,0 +1,76 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 202 +expression: response +snapshot_kind: text +--- +{ + "interface": "ProgrammableNFT", + "id": "J1S9H3QjnRtBbbuD4HjPV6RpRhwuk4zKbxsnCHuTgh9w", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "https://madlads-collection.s3.us-west-2.amazonaws.com/_collection.json", + "files": [], + "metadata": { + "name": "Mad Lads", + "symbol": "MAD", + "token_standard": "ProgrammableNonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.05, + "basis_points": 500, + "primary_sale_happened": false, + "locked": false + }, + "creators": [ + { + "address": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW", + "share": 100, + "verified": true + } + ], + "ownership": { + "frozen": true, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW" + }, + "supply": null, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 18446744073709551615, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "2G5CotQ6Q87yhZxKUWkwLY6Foi12Q3VFQ6KN4nTLbPSz", + "freeze_authority": "2G5CotQ6Q87yhZxKUWkwLY6Foi12Q3VFQ6KN4nTLbPSz" + } +} From 938ced4460bfddac3a9f6c55816fb6785d90aad5 Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Wed, 22 Jan 2025 20:19:46 +0000 Subject: [PATCH 05/33] fix for ownership merge --- rocks-db/src/columns/asset.rs | 63 +++++++++++++++++++++++++++++++---- 1 file changed, 57 insertions(+), 6 deletions(-) diff --git a/rocks-db/src/columns/asset.rs b/rocks-db/src/columns/asset.rs index 5763fa9a..5c8e66d1 100644 --- a/rocks-db/src/columns/asset.rs +++ b/rocks-db/src/columns/asset.rs @@ -2050,14 +2050,14 @@ pub fn merge_complete_details_fb_simple_raw<'a>( // now the merged owner holds the merged data. We need to check if it's marked as the current owner and if it's newer than the current owner // if it doesn't have the is current owner set to true we don't need to do anything if merged_owner.is_current_owner.map(|u| u.value()).unwrap_or(false) - && !owner_is_current_owner.map(|u| u.value()).unwrap_or(false) - || merged_owner - .is_current_owner - .map(|u| u.slot_updated()) - .unwrap_or_default() - > owner_is_current_owner + && (!owner_is_current_owner.map(|u| u.value()).unwrap_or(false) + || merged_owner + .is_current_owner .map(|u| u.slot_updated()) .unwrap_or_default() + > owner_is_current_owner + .map(|u| u.slot_updated()) + .unwrap_or_default()) { // if the merged owner is newer we set it as the owner and move the old owner into the other known owners let previous_owner = FbOwnerContainer { @@ -3813,4 +3813,55 @@ mod tests { ); } } + + #[test] + fn test_merge_with_different_pubkeys_different_slots_owner_not_changed() { + // The asset recieves some stale update with is_owner set to false. No change of ownership is expected + // The example is taken from Eclipse asset F9fyHSja6zTiXjgMPQXpZJuJ4GW97Mpc6UrSX21CBBJ2 that had issues. + let original_data_bytes = solana_sdk::bs58::decode("WJRotv6FnkVY4Nqw6dQZByzbxn6QWXPdwn6msr2w1YdCVaiqWkeNz7Ygwq6TCPrG8HnF7MknbESdH6Hqw3j7QVd4KzezvbTvrKFTyX2eW8g47kq9yJs9Aerti2B3oRfcoC1GiyigtziXYRub5UWsEW6NvUnT8U4H4ozJ4FnGHJHN8WL21EC25MQXsW8Rc3QY9VGne2AtC8CnWFYS3fQTCTxTJMNd3c2iGUc8hZbPDi7nTiDwA55GMNmXqGNV45jjMucmqMnLvBp53QYYRr6xFSURNLdyFaLWN1toU1hV3NxQPmwveBfyuvyT6ic8PKbJoD2qT5Pbqo5FZiCFJ5egqn5YRN74DCs9YYwVr6uiiuWuqxwRcpW3bE73hEVSBvgZv2KR7DiPgwrY5ChLDT4Q6FxKEtaNToY8beLTP2ma8HhSPrJ7K1of5UpjHkzQ55KYo2b228QHjdXoXTUVvmPMTtqZRmvdnA5jx9sakwdjFEDUTihJ5aqwkFc4aVPagUzPYZ3RgbjHgjLP6Sq1QqZngxjYUwi9VnWCpzzSrAPSPn3HgfWQkFshXRaz9wXv5jpA1fRnZUxY36c8Lybt8FF9UGE1VKUTtFtdFc5wFWBXJL7B1tbbCSKUp96hkHHgPVB2ceh1YdDbuFcSc5Vo54m3dAj82AD2RAEiLM2miv3QhLpiLswMw5uGJkyMqXUjN4vMatGdDGsAxRBypiSSXYwxYhPbuEqvzXic9kG3rkgF4TnQGnUMcmmgscVU6FekrFqZ66ACm4Dhb7LRNzNTCeGC3ksh48YdxYaDEcoUqssp4iTnmxPvRSx4oS4gPG38t4sQFYL8BVZdEmax28BxjhATZW7qCe7r9KMeSMLQ5rPKtUQyVQtGm3Tcb2iXeKtNjwT4uFTTqNmtNe35SRa9LUN5HuVoQ6dumJD8pSWKWK5JCqbHAh1G7q4GDfZQn3oXX3Xn4ExYpTymwPrZjynsx3aMfYnE25kr5L8wAMbXy85hdEcawWPidFJxs5Ggqy5MdtU5zDKR15yQjqmuZBwukmKYk8fCgxeKy6xhzWZC8fv6gyooRqEkcuSugXTbUG6fKPp1Xd4yDweX4XHoxeGy3VYKQbmbssCqfKAvqJwy6kEcyq4wzVBBC2q7Z1K5nRvwd96bnfVub4nPcmYQ2hinAZgnDL37cULxSvNhXgv8yaurkTumEtimvZkPeNsP5FQFy1eHv7qhtyzdB8WCmdv6G2HanWfmP4DEjPvz1n6PVqtLhZJvCuhtkcFweuV9CmQa1dPYoa6M6VjksmCpRPo3B3U4ZBUWHtZDKkzPjsu5Hq13bx35oR7qqWFLT9JGikAmr1dxpS2TBCBTb4y6W1wkK8C6mKCsndEsADSQZsxZFFkUx4vvDmbNgN69FhCjgo2JiZk38XJ9H6ijcGGjNzWzcV8tGNqMaMK65xKdCz5w3TuPKgdjo5GKwziGwNJtHf7Ti3yiZ5Q1PS8nts6kXQN15ne6LkeuX7zpBF3YQEi5f8pa4DARtBBpmJaVCvC2QF72doUepTaakP8y2jk7P7YeTWctkGBVmzVpQPJak5PMNSraKBFeyACef5A3RouEd1jp3PeFPgx3yoHXyvLidnNwUzbUmWaknBA2ntE33f81wUNC2CtazWDqciWavRr6o8cCNMtiveMV64kW3pn2mhjT8faAfJ6wSk5e97avk3FkM82Faue4732Hye69hq9GSXZ1Gs8uUD6bFyFPx4GVphvyRjTeF1PCTfkFumGzs3amCW2qoKUk4EzGQQVUqWWqRLrRFkQGVpFjxaz5zH4MRBSrCLw1dq718hBwLtYfKwrQm58MrA5Z9yT6FgxaRrU5oLRmwueJAiRNFmKfFmFBYQJ4FZhu8KSCh2HLi8aaSJ7X9TpiuYmNaaLsWo8VyX2SAA1a5AWsFxPjdfVDgq5dxjB9qqCM1QpiB3kNih8rurRVWiTJaG8cho5xhNZNtxRkXuUyMr6DCbpBRo52LmTh1sJYjPZyqV1V9SEUEzLLNugUw1PzhzmLenvUP4qRWEDtfurdpiSMGfpqGWA1UG6yroxbBiusHPqa99DzknwPsFmRHUFmZ9Hj1Rck5BpTRbXSo5RoXC4qwRPsxwWWXbwuASJGfSSozf9z7qg3k8RosBWEVpDt9JM1bWHkWg43XRM3ziLqHYNeWwRnDLmMhFUYT8cUJtwfUrxQFVaarM5Z7pb661FUDgEqrob6vAZcEh72FSfi8s1jQdqDiMwP7T8AiXsyKk81cSD84k8JPFeWTJmeHosH6KVTNuaDjzvntVtcEcSU8UozwgraR2oQ59teHBxWi2wt7HTycdxfVCpUdgsT6NRTjGbPdFZgp2DiAGPuQyZpyLyKBfFzAmL8cVSEKUqNxdJbpXAcrwyEjeSzfjWiB4Gkogt7AemrMwMoEy8sn1CsbnpQ1z8EfGzsK4486brFQHqB8JXuyyhMe3Q8XgXANYJuh3bypQneX7ZxHmPdeASTpJXJvsXUdv7xZyyKrn9vjNEnqsaNoDHqvUgWayPPBBUpWDRDSNskE4PeRme6d3WxEaP32a678LDx8givQaQNAVEfNzAXokx6449dhhVuh1BNuV4MMinSAkU3tTLtw7Y4fhY469ve41wabxYXHrjDniWEE9a9VPxPKmJpvg5gFrrKpbjpE6cpXYJgFXLczew5uVsrVhL4uCERWCGtSyVzRvriJt8WvVeu2w4csHq8WJLX7ESJXyrTdGVvKKLB5gKo2jHHzRzR8u2S6xpH4uJe8LTQt9P7bJQFvBodggj9HocSLZ1GFJmbpYhQHfxVEfJxWpmQcAeExktb6Ba1fHsuuG2PgYnfU6ovyr6bRamdx5oM4wXWcnaAdr1he3TaGM55kxg3QDNXAujd7xSfvPAXYZK248V7xcRr8bv6bnb8oq3rCyhrgRoDj81YCKPM7Xankbzr1RGbzcibEGrYALLa65N6qVWEWL1xtvgT45L3ysRuU8vC5qnbosViAguYik3pzrE3fJUdX3qUs57dWkRysgciiW9RwFtjUtdxufwqVrFHFd8niGqAMLdpkT9BERf3RUbx7bpqxUJSDMfQo9L4C7rmfh5dffWcWsRe8hdu9zT9Gou9E68SdfHn7yyXzPGMXGjL5xd7BxfaN4kPx4Te2x443YNmvCEw4LJxr9aEiqLt2AaoSfrHJdN88HccrF5bCj6S1QBAznSQquuEvMNRA6oPXtYi9DS1UDH1rYhAMfkh3pFy2rg1MCziNiDg5hENMmq1b5WVuqHAH5F4Df71a3tzbfkGJX8RDJ7CiFkgB1PNo1SRSm1CvH7FNvbTa6b4VJ86PmtcWpLDUWEErtzG5N9ArX9f6XBHWW1DmxJ8KabZopGYTTR1X6aqJxfTVsEkTkRwjQuRBAStjBCFeXd3CLFXsknPuZ1QoFjTnczwdaQtnxstXHnChWS8HbK6oQvTJ7JRK6axZEXiKpgqEMtphnkWw7UEMF4AnhRErvMaTMCzv5u89MKYv9dSz4p7ZM4SpGRiz7rvqWTZ9kUhEBK5kF77b9kyzFxoR5L3vn48qJRuaP5jmscnGeJ4kPMfxyGucVTJu5pfqrFa5a6sAmJC96jh7HmTCWRHAWJ9Bnhn9LoMdfmZMnf1xucRjFF7Qf3MX64hNrLypg5j3HCZBCoSg3vns9HEk6pYqDQNkeTaHHuj5ZxcdXZKqfLaTdSPUkCsaV2tbQDKT5URS3UmLQt3Q3G5k9K4TB7TXRCBGNz5d2KAgFw2fVYZtL5LAbASYqsJY9wDWtegyPNcdP4ZxwsKTXPq8yREhrSQHFkdKsFJWr7RwScR44C61YHGrCw7ipUNLTA4Vwgsfq2Z92yMQkHw8Y4wUSjZe5JzkaKRaWiRojzy4uMYNmS8vr7KAEhqhimD5aCR1ebsruC3LcjKQBEu1WoxUJjX7VUWZohZbHdbBp5z7de6d75NUT6oMveYvsnBB8aEohYxUgxumJHjhV9rWAM5ZiiugLcDizR3M51mo1XmUunGYbbzcMV6rSMkiVxGDPNDEkgbzqKj4kEPNWCnibxsmqu4sZNwtjBYKqk8hPMePUfbCTgWtaf925HNK72MeWASTqmMJfYBfHtnnNRvoLMwjCDjTnAF6Nfq2LTA1R486h6KVm4XDCj8djrYdHrXq2LpRg2dz4YLAD4RiFLiEDb5xA9Q").into_vec().unwrap(); + let existing_owner = + Pubkey::from_str("6wcxtwMH4ZTDFNDyVwgWUvMnTqW1v8gyNULDzDFtFEoA").unwrap(); + + let updated_owner = + Pubkey::from_str("3GTaP1A8qdNGGMme8mcvmRZURQYeFHaWS3tM9UhTH5V9").unwrap(); + let updated_owner_pubkey = + Pubkey::from_str("DspBshRwNY1bJ9HCyWpgeNQZzv7BnAoPs4bzHZ82muPk").unwrap(); + + let slot = 42751100; + let wv = 9580929183; + + let mut builder = FlatBufferBuilder::new(); + + let owner_a_not_an_owner_data = AssetOwner { + pubkey: updated_owner_pubkey, + owner: Updated { + value: Some(updated_owner), + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(wv)), + }, + is_current_owner: Updated { + value: false, + slot_updated: slot, + update_version: Some(UpdateVersion::WriteVersion(wv)), + }, + ..Default::default() + } + .convert_to_fb(&mut builder); + builder.finish_minimal(owner_a_not_an_owner_data); + let owner_a_not_an_owner_data = builder.finished_data().to_vec(); + builder.reset(); + + let merge_result = merge_complete_details_fb_simple_raw( + &[], + Some(&original_data_bytes.as_slice()), + vec![owner_a_not_an_owner_data.as_slice()].into_iter(), //perm.into_iter().map(|d| *d), + ) + .expect("expected merge to return some value"); + let resulting_asset = fb::root_as_asset_complete_details(&merge_result).unwrap(); + + assert_eq!( + resulting_asset.owner().unwrap().owner().unwrap().value().unwrap().bytes(), + existing_owner.to_bytes() + ); + } } From 2693ff7269c64e86d7a31f60ee2180c17ca06719 Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Thu, 23 Jan 2025 10:32:17 +0000 Subject: [PATCH 06/33] removed unused comment --- rocks-db/src/columns/asset.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rocks-db/src/columns/asset.rs b/rocks-db/src/columns/asset.rs index 5c8e66d1..e4b36266 100644 --- a/rocks-db/src/columns/asset.rs +++ b/rocks-db/src/columns/asset.rs @@ -3854,7 +3854,7 @@ mod tests { let merge_result = merge_complete_details_fb_simple_raw( &[], Some(&original_data_bytes.as_slice()), - vec![owner_a_not_an_owner_data.as_slice()].into_iter(), //perm.into_iter().map(|d| *d), + vec![owner_a_not_an_owner_data.as_slice()].into_iter(), ) .expect("expected merge to return some value"); let resulting_asset = fb::root_as_asset_complete_details(&merge_result).unwrap(); From 959680530efeec8632042d407840d37c420a5ef7 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Thu, 23 Jan 2025 12:26:44 +0100 Subject: [PATCH 07/33] MTG-1223 Improve Ingester config Clap parameters. - replace bool with Option to fix issue with required fields --- nft_ingester/src/bin/ingester/main.rs | 16 ++++----- nft_ingester/src/config.rs | 51 ++++++++++++--------------- 2 files changed, 31 insertions(+), 36 deletions(-) diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index bf9bf4c4..d8531a09 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -84,12 +84,12 @@ pub async fn main() -> Result<(), IngesterError> { info!("Starting Ingester..."); info!("___________________________________",); - info!("API: {}", args.run_api); - if args.run_api { + info!("API: {}", args.run_api.unwrap_or(false)); + if args.run_api.unwrap_or(false) { info!("API port: localhost:{}", args.server_port); } - info!("Back Filler: {}", args.run_backfiller); - info!("Bubblegum BackFiller: {}", args.run_bubblegum_backfiller); + info!("Back Filler: {}", args.run_backfiller.unwrap_or(false)); + info!("Bubblegum BackFiller: {}", args.run_bubblegum_backfiller.unwrap_or(false)); info!("Gap Filler: {}", args.run_gapfiller); info!("Run Profiling: {}", args.run_profiling); info!("Sequence Consistent Checker: {}", args.run_sequence_consistent_checker); @@ -130,7 +130,7 @@ pub async fn main() -> Result<(), IngesterError> { let primary_rocks_storage = Arc::new( init_primary_storage( &args.rocks_db_path_container, - args.enable_rocks_migration, + args.enable_rocks_migration.unwrap_or(false), &args.rocks_migration_storage_path, &metrics_state, mutexed_tasks.clone(), @@ -327,7 +327,7 @@ pub async fn main() -> Result<(), IngesterError> { let cloned_rx = shutdown_rx.resubscribe(); let file_storage_path = args.file_storage_path_container.clone(); - if args.run_api { + if args.run_api.unwrap_or(false) { info!("Starting API (Ingester)..."); let middleware_json_downloader = args .json_middleware_config @@ -390,7 +390,7 @@ pub async fn main() -> Result<(), IngesterError> { let shutdown_token = CancellationToken::new(); // Backfiller - if args.run_backfiller { + if args.run_backfiller.unwrap_or(false) { info!("Start backfiller..."); let backfill_bubblegum_updates_processor = Arc::new(BubblegumTxProcessor::new( @@ -425,7 +425,7 @@ pub async fn main() -> Result<(), IngesterError> { .await, ); - if args.run_bubblegum_backfiller { + if args.run_bubblegum_backfiller.unwrap_or(false) { info!("Runing Bubblegum backfiller (ingester)..."); if args.should_reingest { diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 5fa2cb32..d40b7d85 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -1,4 +1,4 @@ -use clap::{ArgAction, Parser, ValueEnum}; +use clap::{Parser, ValueEnum}; use figment::value::Dict; use serde::Deserialize; use solana_sdk::commitment_config::CommitmentLevel; @@ -68,13 +68,12 @@ pub struct IngesterClapArgs { pub parallel_json_downloaders: i32, #[clap( - long("disable-api"), - default_value_t = true, - action = ArgAction::SetFalse, + long("run-api"), + default_value = "true", env = "RUN_API", - help = "Disable API (default: true)" + help = "Run API (default: true)" )] - pub run_api: bool, + pub run_api: Option, #[clap( long("run-gapfiller"), @@ -89,7 +88,7 @@ pub struct IngesterClapArgs { pub gapfiller_peer_addr: Option, #[clap( - long("run-profiling"), + long, default_value_t = false, env = "INGESTER_RUN_PROFILING", help = "Start profiling (default: false)" @@ -114,16 +113,14 @@ pub struct IngesterClapArgs { #[clap(long, env, help = "Rocks backup archives dir")] pub rocks_backup_archives_dir: Option, - // requires = "rocks_migration_storage_path" is not working because default value is true. (clap issue) #[clap( - long("disable-rocks-migration"), + long, env = "ENABLE_ROCKS_MIGRATION", - action = ArgAction::SetFalse, - default_value_t = true, - help = "Disable migration for rocksdb (default: true) requires: rocks_migration_storage_path" + default_value = "true", + help = "Enable migration for rocksdb (default: true) requires: rocks_migration_storage_path" )] - pub enable_rocks_migration: bool, - #[clap(long, env, help = "Migration storage path dir")] + pub enable_rocks_migration: Option, + #[clap(long, env, requires_if("true", "enable_rocks_migration"), help = "Migration storage path dir")] pub rocks_migration_storage_path: Option, #[clap(long, env, help = "Start consistent checker (default: false)")] @@ -181,18 +178,17 @@ pub struct IngesterClapArgs { #[clap(long, env, help = "#api Storage service base url")] pub storage_service_base_url: Option, - // requires = "rocks_slots_db_path" is not working because default value is true. #[clap( - long("disable-backfiller"), - action = ArgAction::SetFalse, + long, env = "RUN_BACKFILLER", - default_value_t = true, - help = "Disable backfiller. (default: true) requires: rocks_slots_db_path", + default_value = "true", + help = "Run backfiller. (default: true) requires: rocks_slots_db_path", )] - pub run_backfiller: bool, + pub run_backfiller: Option, #[clap( long, env, + requires_if("true", "run_backfiller"), help = "#backfiller Path to the RocksDB instance with slots (required for the backfiller to work)" )] pub rocks_slots_db_path: Option, @@ -213,13 +209,12 @@ pub struct IngesterClapArgs { pub big_table_config: Option, #[clap( - long("disable-bubblegum-backfiller"), - action = ArgAction::SetFalse, + long, env = "RUN_BUBBLEGUM_BACKFILLER", - default_value_t = true, - help = "#bubbl Disable bubblegum backfiller (default: true)" + default_value = "true", + help = "#bubbl Run bubblegum backfiller (default: true)" )] - pub run_bubblegum_backfiller: bool, + pub run_bubblegum_backfiller: Option, #[clap( long, env = "SHOULD_REINGEST", @@ -581,18 +576,18 @@ mod tests { assert_eq!(args.pg_max_db_connections, 100); assert_eq!(args.sequence_consistent_checker_wait_period_sec, 60); assert_eq!(args.parallel_json_downloaders, 100); - assert_eq!(args.run_api, true); + assert!(args.run_api.unwrap_or(false)); assert_eq!(args.run_gapfiller, false); assert_eq!(args.run_profiling, false); assert_eq!(args.is_restore_rocks_db, false); - assert_eq!(args.run_bubblegum_backfiller, true); + assert!(args.run_bubblegum_backfiller.unwrap_or(false)); assert_eq!(args.run_sequence_consistent_checker, false); assert_eq!(args.should_reingest, false); assert_eq!(args.check_proofs, false); assert_eq!(args.check_proofs_commitment, CommitmentLevel::Finalized); assert_eq!(args.archives_dir, "/rocksdb/_rocks_backup_archives"); assert_eq!(args.skip_check_tree_gaps, false); - assert_eq!(args.run_backfiller, true); + assert!(args.run_backfiller.unwrap_or(false)); assert_eq!(args.backfiller_source_mode, BackfillerSourceMode::RPC); assert_eq!(args.heap_path, "/usr/src/app/heaps"); assert_eq!(args.log_level, "info"); From aea5c9565141b2b3b8e62f3ccb36a3c2cf6ff431 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Thu, 23 Jan 2025 12:27:38 +0100 Subject: [PATCH 08/33] MTG-1223 ftm fix --- nft_ingester/src/config.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index d40b7d85..d7e9936a 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -120,7 +120,12 @@ pub struct IngesterClapArgs { help = "Enable migration for rocksdb (default: true) requires: rocks_migration_storage_path" )] pub enable_rocks_migration: Option, - #[clap(long, env, requires_if("true", "enable_rocks_migration"), help = "Migration storage path dir")] + #[clap( + long, + env, + requires_if("true", "enable_rocks_migration"), + help = "Migration storage path dir" + )] pub rocks_migration_storage_path: Option, #[clap(long, env, help = "Start consistent checker (default: false)")] @@ -182,7 +187,7 @@ pub struct IngesterClapArgs { long, env = "RUN_BACKFILLER", default_value = "true", - help = "Run backfiller. (default: true) requires: rocks_slots_db_path", + help = "Run backfiller. (default: true) requires: rocks_slots_db_path" )] pub run_backfiller: Option, #[clap( From e3b55168761b3aef6a2e24109d832b8821f063af Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Thu, 23 Jan 2025 12:32:45 +0100 Subject: [PATCH 09/33] MTG-1223 update readme --- nft_ingester/src/bin/ingester/readme.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nft_ingester/src/bin/ingester/readme.md b/nft_ingester/src/bin/ingester/readme.md index 11e288c0..d363c821 100644 --- a/nft_ingester/src/bin/ingester/readme.md +++ b/nft_ingester/src/bin/ingester/readme.md @@ -28,9 +28,9 @@ Run indexer with minimum functionality. (without API/Back Filler/Bubblegum BackF --pg-database-url postgres://solana:solana@localhost:5432/aura_db \ --rpc-host https://mainnet-aura.metaplex.com/{personal_rpc_key} \ --redis-connection-config '{"redis_connection_str":"redis://127.0.0.1:6379/0"}' \ - --disable-api \ - --disable-backfiller \ - --disable-rocks-migration + --run-api false \ + --run-backfiller false \ + --enable-rocks-migration false ``` From 1fbfc101c7dc961397b5de73ca4c3ef54e094c22 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Thu, 23 Jan 2025 16:42:10 +0200 Subject: [PATCH 10/33] feat: add a separate secondary rocks backup service --- .dockerignore | 3 +- docker-compose.yaml | 22 +++++ ingester.Dockerfile | 6 +- metrics_utils/src/lib.rs | 41 +++++++++ nft_ingester/src/bin/ingester/main.rs | 13 --- nft_ingester/src/bin/rocksdb_backup/main.rs | 97 +++++++++++++++++++++ nft_ingester/src/config.rs | 42 +++++++++ nft_ingester/src/error/mod.rs | 6 +- nft_ingester/src/rocks_db.rs | 15 +--- rocks-db/src/backup_service.rs | 63 +++++++------ rocks-db/src/errors.rs | 14 +-- 11 files changed, 251 insertions(+), 71 deletions(-) create mode 100644 nft_ingester/src/bin/rocksdb_backup/main.rs diff --git a/.dockerignore b/.dockerignore index 1361c02a..49d3c3ea 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,4 +4,5 @@ target db-data node-modules ledger -.anchor \ No newline at end of file +tmp +.anchor diff --git a/docker-compose.yaml b/docker-compose.yaml index 16293df1..ca544ad5 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -161,3 +161,25 @@ services: logging: options: max-size: "100m" + + rocksdb-backup: + container_name: rocksdb-backup + restart: always + entrypoint: sh -c "if [ -z '$$MALLOC_CONF' ]; then exec ./rocksdb_backup; else exec ./profiling_rocksdb_backup; fi" + env_file: + - .env + network_mode: host + volumes: + - ${ROCKS_DB_PATH}:${ROCKS_DB_PATH_CONTAINER}:rw + - ${ROCKS_BACKUP_DIR}:${ROCKS_BACKUP_DIR}:rw + - ${ROCKS_BACKUP_ARCHIVES_DIR}:${ROCKS_BACKUP_ARCHIVES_DIR}:rw + - ${PROFILING_FILE_PATH}:${PROFILING_FILE_PATH_CONTAINER}:rw + - ${ROCKS_DB_SECONDARY_PATH}:${ROCKS_DB_SECONDARY_PATH_CONTAINER}:rw + - ./heaps:/usr/src/app/heaps:rw + build: + context: . + dockerfile: ingester.Dockerfile + stop_grace_period: 2m + logging: + options: + max-size: "100m" diff --git a/ingester.Dockerfile b/ingester.Dockerfile index 2269b423..94d91715 100644 --- a/ingester.Dockerfile +++ b/ingester.Dockerfile @@ -38,12 +38,12 @@ RUN cargo chef cook --release --recipe-path recipe.json # Building the services FROM cacher AS builder COPY . . -RUN cargo build --release --bin ingester --bin api --bin backfill --bin synchronizer --bin slot_persister +RUN cargo build --release --bin ingester --bin api --bin backfill --bin synchronizer --bin slot_persister --bin rocksdb_backup # Building the profiling feature services FROM cacher AS builder-with-profiling COPY . . -RUN cargo build --release --features profiling --bin ingester --bin api --bin backfill --bin synchronizer --bin slot_persister +RUN cargo build --release --features profiling --bin ingester --bin api --bin backfill --bin synchronizer --bin slot_persister --bin rocksdb_backup # Final image FROM rust:1.84-slim-bullseye AS runtime @@ -58,11 +58,13 @@ COPY --from=builder /rust/target/release/backfill ${APP}/backfill COPY --from=builder /rust/target/release/api ${APP}/api COPY --from=builder /rust/target/release/synchronizer ${APP}/synchronizer COPY --from=builder /rust/target/release/slot_persister ${APP}/slot_persister +COPY --from=builder /rust/target/release/rocksdb_backup ${APP}/rocksdb_backup COPY --from=builder-with-profiling /rust/target/release/ingester ${APP}/profiling_ingester COPY --from=builder-with-profiling /rust/target/release/backfill ${APP}/profiling_backfill COPY --from=builder-with-profiling /rust/target/release/api ${APP}/profiling_api COPY --from=builder-with-profiling /rust/target/release/synchronizer ${APP}/profiling_synchronizer COPY --from=builder-with-profiling /rust/target/release/slot_persister ${APP}/profiling_slot_persister +COPY --from=builder-with-profiling /rust/target/release/rocksdb_backup ${APP}/profiling_rocksdb_backup WORKDIR ${APP} STOPSIGNAL SIGINT diff --git a/metrics_utils/src/lib.rs b/metrics_utils/src/lib.rs index 4a177b4b..ad75488a 100644 --- a/metrics_utils/src/lib.rs +++ b/metrics_utils/src/lib.rs @@ -1267,3 +1267,44 @@ impl BatchMintPersisterMetricsConfig { ); } } + +#[derive(Debug, Clone)] +pub struct RocksDbMetricsConfig { + start_time: Gauge, + rocksdb_backup_latency: Histogram, +} + +impl RocksDbMetricsConfig { + pub fn new() -> Self { + Self { + start_time: Default::default(), + rocksdb_backup_latency: Histogram::new( + [60.0, 300.0, 600.0, 1200.0, 1800.0, 3600.0, 5400.0, 7200.0, 9000.0, 10800.0] + .into_iter(), + ), + } + } + + pub fn start_time(&self) -> i64 { + self.start_time.set(Utc::now().timestamp()) + } + + pub fn set_rocksdb_backup_latency(&self, duration: f64) { + self.rocksdb_backup_latency.observe(duration); + } + + pub fn register(&self, registry: &mut Registry) { + self.start_time(); + registry.register( + "ingester_rocksdb_backup_latency", + "Histogram of rocksdb backup duration", + self.rocksdb_backup_latency.clone(), + ); + } +} + +impl Default for RocksDbMetricsConfig { + fn default() -> Self { + Self::new() + } +} diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index bf9bf4c4..c81e6e61 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -625,17 +625,4 @@ pub async fn main() -> Result<(), IngesterError> { .await; Ok(()) - - // todo: remove backup service from here and move it to a separate process with a secondary db - verify it's possible first! - // start backup service - // if config.store_db_backups() { - // info!("Start store DB backup..."); - // let backup_service = BackupService::new(primary_rocks_storage.db.clone(), &backup_service::load_config()?)?; - // let cloned_metrics = metrics_state.ingester_metrics.clone(); - // let cloned_rx = shutdown_rx.resubscribe(); - // mutexed_tasks - // .lock() - // .await - // .spawn(perform_backup(backup_service, cloned_rx, cloned_metrics)); - // } } diff --git a/nft_ingester/src/bin/rocksdb_backup/main.rs b/nft_ingester/src/bin/rocksdb_backup/main.rs new file mode 100644 index 00000000..87d99c01 --- /dev/null +++ b/nft_ingester/src/bin/rocksdb_backup/main.rs @@ -0,0 +1,97 @@ +use std::sync::Arc; + +use clap::Parser; +use metrics_utils::{utils::start_metrics, RocksDbMetricsConfig}; +use nft_ingester::{ + config::{init_logger, RocksDbBackupServiceClapArgs}, + init::graceful_stop, +}; +use prometheus_client::registry::Registry; +use rocks_db::{ + backup_service::{RocksDbBackupService, RocksDbBackupServiceConfig}, + errors::RocksDbBackupServiceError, + migrator::MigrationState, + Storage, +}; +use tokio::{ + sync::{broadcast, Mutex}, + task::JoinSet, +}; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info}; + +#[cfg(feature = "profiling")] +#[global_allocator] +static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc; + +#[tokio::main(flavor = "multi_thread")] +async fn main() -> Result<(), RocksDbBackupServiceError> { + let args = RocksDbBackupServiceClapArgs::parse(); + init_logger(&args.log_level); + + info!("Starting RocksDb backup service..."); + + let guard = if args.is_run_profiling { + Some(pprof::ProfilerGuardBuilder::default().frequency(100).build().unwrap()) + } else { + None + }; + + let mut registry = Registry::default(); + let metrics = Arc::new(RocksDbMetricsConfig::new()); + metrics.register(&mut registry); + let red_metrics = Arc::new(metrics_utils::red::RequestErrorDurationMetrics::new()); + red_metrics.register(&mut registry); + + let tasks = JoinSet::new(); + let mutexed_tasks = Arc::new(Mutex::new(tasks)); + + let storage = Storage::open_secondary( + &args.rocks_db_path_container, + &args.rocks_db_secondary_path, + mutexed_tasks.clone(), + red_metrics.clone(), + MigrationState::Last, + ) + .unwrap(); + + debug!( + rocks_db_path_container = %args.rocks_db_path_container, + rocks_db_secondary_path = %args.rocks_db_secondary_path, + "Opened RocksDb in secondary mode" + ); + + let rocks_storage = Arc::new(storage); + let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + let shutdown_token = CancellationToken::new(); + + info!("Starting store DB backup..."); + let mut backup_service = RocksDbBackupService::new( + rocks_storage.db.clone(), + &RocksDbBackupServiceConfig { + rocks_backup_dir: args.backup_dir, + rocks_backup_archives_dir: args.backup_archives_dir, + rocks_flush_before_backup: args.flush_before_backup, + rocks_interval_in_seconds: args.interval_in_seconds, + }, + )?; + let cloned_rx = shutdown_rx.resubscribe(); + mutexed_tasks + .lock() + .await + .spawn(async move { backup_service.perform_backup(metrics.clone(), cloned_rx).await }); + + start_metrics(registry, args.metrics_port).await; + // --stop + graceful_stop( + mutexed_tasks.clone(), + shutdown_tx, + Some(shutdown_token), + guard, + args.profiling_file_path_container, + &args.heap_path, + ) + .await; + + Ok(()) +} diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 5fa2cb32..91fca6b0 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -320,6 +320,48 @@ pub struct SynchronizerClapArgs { pub log_level: String, } +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +pub struct RocksDbBackupServiceClapArgs { + #[clap(long, env, default_value = "./my_rocksdb", help = "Rocks db path container")] + pub rocks_db_path_container: String, + #[clap(long, env, default_value = "./my_rocksdb_secondary", help = "Rocks db secondary path")] + pub rocks_db_secondary_path: String, + #[clap(long, env = "ROCKS_BACKUP_ARCHIVES_DIR", help = "Rocks backup archives dir")] + pub backup_archives_dir: String, + #[clap(long, env = "ROCKS_BACKUP_DIR", help = "Rocks backup dir")] + pub backup_dir: String, + #[clap( + long, + env = "ROCKS_FLUSH_BEFORE_BACKUP", + help = "Whether to flush RocksDb before backup" + )] + pub flush_before_backup: bool, + #[clap(long, env = "ROCKS_INTERVAL_IN_SECONDS", help = "Backup interval (seconds)")] + pub interval_in_seconds: i64, + + #[clap( + long("run_profiling"), + env = "IS_RUN_PROFILING", + default_value_t = false, + help = "Start profiling (default: false)" + )] + pub is_run_profiling: bool, + #[clap(long, env, default_value = "/usr/src/app/heaps", help = "Heap path")] + pub heap_path: String, + + #[clap( + long, + env = "ROCKS_DB_BACKUP_SERVICE_METRICS_PORT", + help = "Metrics port. Start HTTP server to report metrics if port exist." + )] + pub metrics_port: Option, + pub profiling_file_path_container: Option, + + #[clap(long, env, default_value = "info", help = "warn|info|debug")] + pub log_level: String, +} + #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] pub struct MigratorClapArgs { diff --git a/nft_ingester/src/error/mod.rs b/nft_ingester/src/error/mod.rs index 83110a20..11b4f4dc 100644 --- a/nft_ingester/src/error/mod.rs +++ b/nft_ingester/src/error/mod.rs @@ -7,7 +7,7 @@ use interface::error::UsecaseError; use plerkle_messenger::MessengerError; use plerkle_serialization::error::PlerkleSerializationError; use postgre_client::error::IndexDbError; -use rocks_db::errors::{BackupServiceError, StorageError}; +use rocks_db::errors::{RocksDbBackupServiceError, StorageError}; use solana_sdk::{pubkey::ParsePubkeyError, signature::ParseSignatureError}; use solana_transaction_status::EncodeError; use thiserror::Error; @@ -212,8 +212,8 @@ impl From for IngesterError { } } -impl From for IngesterError { - fn from(value: BackupServiceError) -> Self { +impl From for IngesterError { + fn from(value: RocksDbBackupServiceError) -> Self { IngesterError::BackupError(value.to_string()) } } diff --git a/nft_ingester/src/rocks_db.rs b/nft_ingester/src/rocks_db.rs index e4384d06..ff0ca3d0 100644 --- a/nft_ingester/src/rocks_db.rs +++ b/nft_ingester/src/rocks_db.rs @@ -7,10 +7,8 @@ use std::{ time::Duration, }; -use metrics_utils::IngesterMetricsConfig; use rocks_db::{ - backup_service, backup_service::BackupService, errors::BackupServiceError, - storage_traits::AssetSlotStorage, Storage, + backup_service, errors::RocksDbBackupServiceError, storage_traits::AssetSlotStorage, Storage, }; use tokio::{ sync::broadcast::{Receiver, Sender}, @@ -21,15 +19,6 @@ use tracing::{error, info}; use crate::config::INGESTER_BACKUP_NAME; -pub async fn perform_backup( - mut backup_service: BackupService, - cloned_rx: Receiver<()>, - cloned_metrics: Arc, -) -> Result<(), JoinError> { - backup_service.perform_backup(cloned_metrics, cloned_rx).await; - Ok(()) -} - pub async fn receive_last_saved_slot( cloned_rx: Receiver<()>, cloned_tx: Sender<()>, @@ -61,7 +50,7 @@ pub async fn restore_rocksdb( rocks_backup_url: &str, rocks_backup_archives_dir: &str, rocks_db_path_container: &str, -) -> Result<(), BackupServiceError> { +) -> Result<(), RocksDbBackupServiceError> { create_dir_all(rocks_backup_archives_dir)?; let backup_path = format!("{}/{}", rocks_backup_archives_dir, INGESTER_BACKUP_NAME); diff --git a/rocks-db/src/backup_service.rs b/rocks-db/src/backup_service.rs index 10e07ed9..a439dca0 100644 --- a/rocks-db/src/backup_service.rs +++ b/rocks-db/src/backup_service.rs @@ -8,16 +8,16 @@ use std::{ }; use futures_util::StreamExt; -use metrics_utils::IngesterMetricsConfig; +use metrics_utils::RocksDbMetricsConfig; use rocksdb::{ backup::{BackupEngine, BackupEngineOptions, RestoreOptions}, Env, DB, }; use serde::{Deserialize, Serialize}; -use tokio::sync::broadcast::Receiver; +use tokio::{sync::broadcast::Receiver, task::JoinError}; use tracing::{error, info}; -use crate::errors::BackupServiceError; +use crate::errors::RocksDbBackupServiceError; const BACKUP_PREFIX: &str = "backup-rocksdb"; const BACKUP_POSTFIX: &str = ".tar.lz4"; @@ -26,33 +26,26 @@ const NUMBER_ARCHIVES_TO_STORE: usize = 2; const DEFAULT_BACKUP_DIR_NAME: &str = "_rocksdb_backup"; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub struct BackupServiceConfig { +pub struct RocksDbBackupServiceConfig { pub rocks_backup_dir: String, pub rocks_backup_archives_dir: String, pub rocks_flush_before_backup: bool, pub rocks_interval_in_seconds: i64, } -pub fn load_config() -> Result { - figment::Figment::new() - .join(figment::providers::Env::prefixed("INGESTER_")) - .extract() - .map_err(|config_error| BackupServiceError::ConfigurationError(config_error.to_string())) -} - -pub struct BackupService { +pub struct RocksDbBackupService { pub backup_engine: BackupEngine, - pub backup_config: BackupServiceConfig, + pub backup_config: RocksDbBackupServiceConfig, pub db: Arc, } -unsafe impl Send for BackupService {} +unsafe impl Send for RocksDbBackupService {} -impl BackupService { +impl RocksDbBackupService { pub fn new( db: Arc, - config: &BackupServiceConfig, - ) -> Result { + config: &RocksDbBackupServiceConfig, + ) -> Result { let env = Env::new()?; let backup_options = BackupEngineOptions::new(config.rocks_backup_dir.clone())?; let backup_engine = BackupEngine::open(&backup_options, &env)?; @@ -60,7 +53,7 @@ impl BackupService { Ok(Self { backup_engine, backup_config: config.clone(), db }) } - fn create_backup(&mut self, backup_id: u32) -> Result<(), BackupServiceError> { + fn create_backup(&mut self, backup_id: u32) -> Result<(), RocksDbBackupServiceError> { self.backup_engine.create_new_backup_flush( self.db.as_ref(), self.backup_config.rocks_flush_before_backup, @@ -71,9 +64,9 @@ impl BackupService { pub async fn perform_backup( &mut self, - metrics: Arc, + metrics: Arc, mut rx: Receiver<()>, - ) { + ) -> Result<(), JoinError> { let mut last_backup_id = 1; while rx.is_empty() { let start_time = chrono::Utc::now(); @@ -115,9 +108,11 @@ impl BackupService { } }; } + + Ok(()) } - pub fn build_backup_archive(&self, backup_time: i64) -> Result<(), BackupServiceError> { + pub fn build_backup_archive(&self, backup_time: i64) -> Result<(), RocksDbBackupServiceError> { let file_path = format!( "{}/{}-{}{}", self.backup_config.rocks_backup_archives_dir, @@ -140,29 +135,33 @@ impl BackupService { Ok(()) } - pub fn verify_backup_all(&self) -> Result<(), BackupServiceError> { + pub fn verify_backup_all(&self) -> Result<(), RocksDbBackupServiceError> { let backup_infos = self.backup_engine.get_backup_info(); if backup_infos.is_empty() { - return Err(BackupServiceError::BackupEngineInfoIsEmpty {}); + return Err(RocksDbBackupServiceError::BackupEngineInfoIsEmpty {}); } for backup_info in backup_infos.iter() { self.verify_backup_single(backup_info.backup_id)?; if backup_info.size == 0 { - return Err(BackupServiceError::BackupEngineInfoSizeIsZero(backup_info.backup_id)); + return Err(RocksDbBackupServiceError::BackupEngineInfoSizeIsZero( + backup_info.backup_id, + )); } } Ok(()) } - pub fn verify_backup_single(&self, backup_id: u32) -> Result<(), BackupServiceError> { + pub fn verify_backup_single(&self, backup_id: u32) -> Result<(), RocksDbBackupServiceError> { match self.backup_engine.verify_backup(backup_id) { Ok(_) => Ok(()), - Err(err) => Err(BackupServiceError::BackupEngineInfo(backup_id, err.to_string())), + Err(err) => { + Err(RocksDbBackupServiceError::BackupEngineInfo(backup_id, err.to_string())) + }, } } - pub fn delete_old_archives(&self) -> Result<(), BackupServiceError> { + pub fn delete_old_archives(&self) -> Result<(), RocksDbBackupServiceError> { let mut entries: Vec<_> = std::fs::read_dir(self.backup_config.rocks_backup_archives_dir.clone())? .filter_map(|r| r.ok()) @@ -181,7 +180,7 @@ impl BackupService { Ok(()) } - pub fn delete_old_backups(&mut self) -> Result<(), BackupServiceError> { + pub fn delete_old_backups(&mut self) -> Result<(), RocksDbBackupServiceError> { if self.backup_engine.get_backup_info().capacity() > ROCKS_NUM_BACKUPS_TO_KEEP { self.backup_engine.purge_old_backups(ROCKS_NUM_BACKUPS_TO_KEEP)?; } @@ -205,7 +204,7 @@ pub fn get_backup_dir_name(backup_path: &str) -> String { pub async fn download_backup_archive( url: &str, backup_path: &str, -) -> Result<(), BackupServiceError> { +) -> Result<(), RocksDbBackupServiceError> { let resp = reqwest::get(url).await?; if resp.status().is_success() { let mut file = File::create(backup_path)?; @@ -216,10 +215,10 @@ pub async fn download_backup_archive( return Ok(()); } - Err(BackupServiceError::ReqwestError(resp.status().to_string())) + Err(RocksDbBackupServiceError::ReqwestError(resp.status().to_string())) } -pub fn unpack_backup_archive(file_path: &str, dst: &str) -> Result<(), BackupServiceError> { +pub fn unpack_backup_archive(file_path: &str, dst: &str) -> Result<(), RocksDbBackupServiceError> { let file = File::open(file_path)?; let decoder = lz4::Decoder::new(BufReader::new(file))?; let mut archive = tar::Archive::new(decoder); @@ -231,7 +230,7 @@ pub fn unpack_backup_archive(file_path: &str, dst: &str) -> Result<(), BackupSer pub fn restore_external_backup( backup_dir: &str, new_db_dir: &str, -) -> Result<(), BackupServiceError> { +) -> Result<(), RocksDbBackupServiceError> { let env = Env::new()?; let backup_options = BackupEngineOptions::new(backup_dir)?; let mut backup_engine = BackupEngine::open(&backup_options, &env)?; diff --git a/rocks-db/src/errors.rs b/rocks-db/src/errors.rs index 1dd46dc5..4b98ca26 100644 --- a/rocks-db/src/errors.rs +++ b/rocks-db/src/errors.rs @@ -4,7 +4,7 @@ use reqwest; use thiserror::Error; #[derive(Error, Debug, PartialEq, Eq)] -pub enum BackupServiceError { +pub enum RocksDbBackupServiceError { #[error("Backup engine info is empty")] BackupEngineInfoIsEmpty {}, #[error("Backup error for {0}: {1}")] @@ -21,21 +21,21 @@ pub enum BackupServiceError { ReqwestError(String), } -impl From for BackupServiceError { +impl From for RocksDbBackupServiceError { fn from(err: rocksdb::Error) -> Self { - BackupServiceError::DatabaseError(err.to_string()) + RocksDbBackupServiceError::DatabaseError(err.to_string()) } } -impl From for BackupServiceError { +impl From for RocksDbBackupServiceError { fn from(value: Error) -> Self { - BackupServiceError::StdError(value.to_string()) + RocksDbBackupServiceError::StdError(value.to_string()) } } -impl From for BackupServiceError { +impl From for RocksDbBackupServiceError { fn from(value: reqwest::Error) -> Self { - BackupServiceError::ReqwestError(value.to_string()) + RocksDbBackupServiceError::ReqwestError(value.to_string()) } } From 74e4c48a04ec2305ff337fa79c16635e433f10ed Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Fri, 17 Jan 2025 11:41:48 +0200 Subject: [PATCH 11/33] chore: add `start-rocksdb-backup` command to Makefile --- Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Makefile b/Makefile index 3f2fb51a..5626aa83 100644 --- a/Makefile +++ b/Makefile @@ -16,6 +16,9 @@ start-synchronizer: start-api: @docker compose -f docker-compose.yaml up -d das-api +start-rocksdb-backup: + @docker compose -f docker-compose.yaml up -d rocksdb-backup + stop-api: @docker stop --time 20 das-api From 084b2a9d20110ae4f08c55bbb03582b90f545786 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Fri, 17 Jan 2025 17:04:20 +0200 Subject: [PATCH 12/33] chore(ci): mount primary rocksdb path as readonly to backup container --- docker-compose.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index ca544ad5..cb91f7e6 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -170,7 +170,7 @@ services: - .env network_mode: host volumes: - - ${ROCKS_DB_PATH}:${ROCKS_DB_PATH_CONTAINER}:rw + - ${ROCKS_DB_PATH}:${ROCKS_DB_PATH_CONTAINER}:ro - ${ROCKS_BACKUP_DIR}:${ROCKS_BACKUP_DIR}:rw - ${ROCKS_BACKUP_ARCHIVES_DIR}:${ROCKS_BACKUP_ARCHIVES_DIR}:rw - ${PROFILING_FILE_PATH}:${PROFILING_FILE_PATH_CONTAINER}:rw From 4d01d0c4d882b44ef136b2671cc930e61a720938 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Thu, 23 Jan 2025 16:42:27 +0200 Subject: [PATCH 13/33] feat: make the backup service a one-time job --- docker-compose.yaml | 4 +- ingester.Dockerfile | 3 +- metrics_utils/src/lib.rs | 41 ---------- nft_ingester/src/bin/rocksdb_backup/main.rs | 44 +--------- nft_ingester/src/config.rs | 17 ---- rocks-db/src/backup_service.rs | 91 ++++++++++----------- 6 files changed, 47 insertions(+), 153 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index cb91f7e6..a21eff8e 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -164,8 +164,7 @@ services: rocksdb-backup: container_name: rocksdb-backup - restart: always - entrypoint: sh -c "if [ -z '$$MALLOC_CONF' ]; then exec ./rocksdb_backup; else exec ./profiling_rocksdb_backup; fi" + entrypoint: ./rocksdb_backup env_file: - .env network_mode: host @@ -173,7 +172,6 @@ services: - ${ROCKS_DB_PATH}:${ROCKS_DB_PATH_CONTAINER}:ro - ${ROCKS_BACKUP_DIR}:${ROCKS_BACKUP_DIR}:rw - ${ROCKS_BACKUP_ARCHIVES_DIR}:${ROCKS_BACKUP_ARCHIVES_DIR}:rw - - ${PROFILING_FILE_PATH}:${PROFILING_FILE_PATH_CONTAINER}:rw - ${ROCKS_DB_SECONDARY_PATH}:${ROCKS_DB_SECONDARY_PATH_CONTAINER}:rw - ./heaps:/usr/src/app/heaps:rw build: diff --git a/ingester.Dockerfile b/ingester.Dockerfile index 94d91715..f1d10948 100644 --- a/ingester.Dockerfile +++ b/ingester.Dockerfile @@ -43,7 +43,7 @@ RUN cargo build --release --bin ingester --bin api --bin backfill --bin synchron # Building the profiling feature services FROM cacher AS builder-with-profiling COPY . . -RUN cargo build --release --features profiling --bin ingester --bin api --bin backfill --bin synchronizer --bin slot_persister --bin rocksdb_backup +RUN cargo build --release --features profiling --bin ingester --bin api --bin backfill --bin synchronizer --bin slot_persister # Final image FROM rust:1.84-slim-bullseye AS runtime @@ -64,7 +64,6 @@ COPY --from=builder-with-profiling /rust/target/release/backfill ${APP}/profilin COPY --from=builder-with-profiling /rust/target/release/api ${APP}/profiling_api COPY --from=builder-with-profiling /rust/target/release/synchronizer ${APP}/profiling_synchronizer COPY --from=builder-with-profiling /rust/target/release/slot_persister ${APP}/profiling_slot_persister -COPY --from=builder-with-profiling /rust/target/release/rocksdb_backup ${APP}/profiling_rocksdb_backup WORKDIR ${APP} STOPSIGNAL SIGINT diff --git a/metrics_utils/src/lib.rs b/metrics_utils/src/lib.rs index ad75488a..4a177b4b 100644 --- a/metrics_utils/src/lib.rs +++ b/metrics_utils/src/lib.rs @@ -1267,44 +1267,3 @@ impl BatchMintPersisterMetricsConfig { ); } } - -#[derive(Debug, Clone)] -pub struct RocksDbMetricsConfig { - start_time: Gauge, - rocksdb_backup_latency: Histogram, -} - -impl RocksDbMetricsConfig { - pub fn new() -> Self { - Self { - start_time: Default::default(), - rocksdb_backup_latency: Histogram::new( - [60.0, 300.0, 600.0, 1200.0, 1800.0, 3600.0, 5400.0, 7200.0, 9000.0, 10800.0] - .into_iter(), - ), - } - } - - pub fn start_time(&self) -> i64 { - self.start_time.set(Utc::now().timestamp()) - } - - pub fn set_rocksdb_backup_latency(&self, duration: f64) { - self.rocksdb_backup_latency.observe(duration); - } - - pub fn register(&self, registry: &mut Registry) { - self.start_time(); - registry.register( - "ingester_rocksdb_backup_latency", - "Histogram of rocksdb backup duration", - self.rocksdb_backup_latency.clone(), - ); - } -} - -impl Default for RocksDbMetricsConfig { - fn default() -> Self { - Self::new() - } -} diff --git a/nft_ingester/src/bin/rocksdb_backup/main.rs b/nft_ingester/src/bin/rocksdb_backup/main.rs index 87d99c01..45d43f3b 100644 --- a/nft_ingester/src/bin/rocksdb_backup/main.rs +++ b/nft_ingester/src/bin/rocksdb_backup/main.rs @@ -1,11 +1,7 @@ use std::sync::Arc; use clap::Parser; -use metrics_utils::{utils::start_metrics, RocksDbMetricsConfig}; -use nft_ingester::{ - config::{init_logger, RocksDbBackupServiceClapArgs}, - init::graceful_stop, -}; +use nft_ingester::config::{init_logger, RocksDbBackupServiceClapArgs}; use prometheus_client::registry::Registry; use rocks_db::{ backup_service::{RocksDbBackupService, RocksDbBackupServiceConfig}, @@ -13,17 +9,9 @@ use rocks_db::{ migrator::MigrationState, Storage, }; -use tokio::{ - sync::{broadcast, Mutex}, - task::JoinSet, -}; -use tokio_util::sync::CancellationToken; +use tokio::{sync::Mutex, task::JoinSet}; use tracing::{debug, info}; -#[cfg(feature = "profiling")] -#[global_allocator] -static GLOBAL: jemallocator::Jemalloc = jemallocator::Jemalloc; - #[tokio::main(flavor = "multi_thread")] async fn main() -> Result<(), RocksDbBackupServiceError> { let args = RocksDbBackupServiceClapArgs::parse(); @@ -31,15 +19,7 @@ async fn main() -> Result<(), RocksDbBackupServiceError> { info!("Starting RocksDb backup service..."); - let guard = if args.is_run_profiling { - Some(pprof::ProfilerGuardBuilder::default().frequency(100).build().unwrap()) - } else { - None - }; - let mut registry = Registry::default(); - let metrics = Arc::new(RocksDbMetricsConfig::new()); - metrics.register(&mut registry); let red_metrics = Arc::new(metrics_utils::red::RequestErrorDurationMetrics::new()); red_metrics.register(&mut registry); @@ -62,8 +42,6 @@ async fn main() -> Result<(), RocksDbBackupServiceError> { ); let rocks_storage = Arc::new(storage); - let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); - let shutdown_token = CancellationToken::new(); info!("Starting store DB backup..."); let mut backup_service = RocksDbBackupService::new( @@ -72,26 +50,10 @@ async fn main() -> Result<(), RocksDbBackupServiceError> { rocks_backup_dir: args.backup_dir, rocks_backup_archives_dir: args.backup_archives_dir, rocks_flush_before_backup: args.flush_before_backup, - rocks_interval_in_seconds: args.interval_in_seconds, }, )?; - let cloned_rx = shutdown_rx.resubscribe(); - mutexed_tasks - .lock() - .await - .spawn(async move { backup_service.perform_backup(metrics.clone(), cloned_rx).await }); - start_metrics(registry, args.metrics_port).await; - // --stop - graceful_stop( - mutexed_tasks.clone(), - shutdown_tx, - Some(shutdown_token), - guard, - args.profiling_file_path_container, - &args.heap_path, - ) - .await; + mutexed_tasks.lock().await.spawn(async move { backup_service.perform_backup().await }); Ok(()) } diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 91fca6b0..24d6bdfb 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -337,27 +337,10 @@ pub struct RocksDbBackupServiceClapArgs { help = "Whether to flush RocksDb before backup" )] pub flush_before_backup: bool, - #[clap(long, env = "ROCKS_INTERVAL_IN_SECONDS", help = "Backup interval (seconds)")] - pub interval_in_seconds: i64, - #[clap( - long("run_profiling"), - env = "IS_RUN_PROFILING", - default_value_t = false, - help = "Start profiling (default: false)" - )] - pub is_run_profiling: bool, #[clap(long, env, default_value = "/usr/src/app/heaps", help = "Heap path")] pub heap_path: String, - #[clap( - long, - env = "ROCKS_DB_BACKUP_SERVICE_METRICS_PORT", - help = "Metrics port. Start HTTP server to report metrics if port exist." - )] - pub metrics_port: Option, - pub profiling_file_path_container: Option, - #[clap(long, env, default_value = "info", help = "warn|info|debug")] pub log_level: String, } diff --git a/rocks-db/src/backup_service.rs b/rocks-db/src/backup_service.rs index a439dca0..88d9e881 100644 --- a/rocks-db/src/backup_service.rs +++ b/rocks-db/src/backup_service.rs @@ -4,17 +4,16 @@ use std::{ io::{BufReader, Write}, path::Path, sync::Arc, - time::Duration, }; use futures_util::StreamExt; -use metrics_utils::RocksDbMetricsConfig; +use indicatif::{ProgressBar, ProgressStyle}; use rocksdb::{ backup::{BackupEngine, BackupEngineOptions, RestoreOptions}, Env, DB, }; use serde::{Deserialize, Serialize}; -use tokio::{sync::broadcast::Receiver, task::JoinError}; +use tokio::task::JoinError; use tracing::{error, info}; use crate::errors::RocksDbBackupServiceError; @@ -24,13 +23,13 @@ const BACKUP_POSTFIX: &str = ".tar.lz4"; const ROCKS_NUM_BACKUPS_TO_KEEP: usize = 1; const NUMBER_ARCHIVES_TO_STORE: usize = 2; const DEFAULT_BACKUP_DIR_NAME: &str = "_rocksdb_backup"; +const BASE_BACKUP_ID: u32 = 1; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RocksDbBackupServiceConfig { pub rocks_backup_dir: String, pub rocks_backup_archives_dir: String, pub rocks_flush_before_backup: bool, - pub rocks_interval_in_seconds: i64, } pub struct RocksDbBackupService { @@ -62,52 +61,47 @@ impl RocksDbBackupService { self.verify_backup_single(backup_id) } - pub async fn perform_backup( - &mut self, - metrics: Arc, - mut rx: Receiver<()>, - ) -> Result<(), JoinError> { - let mut last_backup_id = 1; - while rx.is_empty() { - let start_time = chrono::Utc::now(); - last_backup_id = match self.backup_engine.get_backup_info().last() { - None => last_backup_id, - Some(backup_info) => { - if (backup_info.timestamp + self.backup_config.rocks_interval_in_seconds) - >= start_time.timestamp() - { - continue; - } - backup_info.backup_id + 1 - }, - }; - - if let Err(err) = self.create_backup(last_backup_id) { - error!("create_backup: {}", err); - } - if let Err(err) = self.delete_old_backups() { - error!("delete_old_backups: {}", err); - } - if let Err(err) = self.build_backup_archive(start_time.timestamp()) { - error!("build_backup_archive: {}", err); - } - if let Err(err) = self.delete_old_archives() { - error!("delete_old_archives: {}", err); - } - - let duration = chrono::Utc::now().signed_duration_since(start_time); - metrics.set_rocksdb_backup_latency(duration.num_milliseconds() as f64); + pub async fn perform_backup(&mut self) -> Result<(), JoinError> { + let start_time = chrono::Utc::now(); + let last_backup_id = match self.backup_engine.get_backup_info().last() { + None => BASE_BACKUP_ID, + Some(backup_info) => backup_info.backup_id + 1, + }; + + let progress_bar = Arc::new(ProgressBar::new(4)); // four steps: + // create backup, delete the old one, build archive, delete old archives + progress_bar.set_style( + ProgressStyle::default_bar() + .template( + "[{bar:40.cyan/blue}] {percent}% \ + ({pos}/{len}) {msg}", + ) + .expect("Failed to set progress bar style") + .progress_chars("#>-"), + ); - info!("perform_backup {}", duration.num_seconds()); + if let Err(err) = self.create_backup(last_backup_id) { + error!(error = %err, "create_backup: {:?}", err); + } - tokio::select! { - _ = tokio::time::sleep(Duration::from_secs(self.backup_config.rocks_interval_in_seconds as u64)) => {}, - _ = rx.recv() => { - info!("Received stop signal, stopping performing backup"); - break; - } - }; + progress_bar.inc(1); + if let Err(err) = self.delete_old_backups() { + error!(error = %err, "delete_old_backups: {:?}", err); + } + progress_bar.inc(1); + if let Err(err) = self.build_backup_archive(start_time.timestamp()) { + error!(error = %err, "build_backup_archive: {:?}", err); + } + progress_bar.inc(1); + if let Err(err) = self.delete_old_archives() { + error!(error = %err, "delete_old_archives: {:?}", err); } + progress_bar.inc(1); + progress_bar.finish_with_message("Backup completed!"); + + let duration = chrono::Utc::now().signed_duration_since(start_time); + + info!(duration = %duration.num_milliseconds(), "Performed backup in {}ms", duration.num_milliseconds()); Ok(()) } @@ -129,8 +123,7 @@ impl RocksDbBackupService { let backup_dir_name = get_backup_dir_name(self.backup_config.rocks_backup_dir.as_str()); tar.append_dir_all(backup_dir_name, self.backup_config.rocks_backup_dir.clone())?; tar.into_inner()?; - let (_output, result) = enc.finish(); - result?; + enc.finish().1?; Ok(()) } From 221938ba5dfb0b6f09da4b56ca33b8ec138151d7 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Thu, 23 Jan 2025 16:43:16 +0200 Subject: [PATCH 14/33] feat: use `PathBuf`s in backup-related functions --- docker-compose.yaml | 3 +- nft_ingester/src/bin/ingester/main.rs | 12 +++-- nft_ingester/src/bin/rocksdb_backup/main.rs | 6 +-- nft_ingester/src/config.rs | 14 +++--- nft_ingester/src/rocks_db.rs | 13 +++--- rocks-db/src/backup_service.rs | 50 ++++++++++++--------- 6 files changed, 56 insertions(+), 42 deletions(-) diff --git a/docker-compose.yaml b/docker-compose.yaml index a21eff8e..9402f86a 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -172,8 +172,7 @@ services: - ${ROCKS_DB_PATH}:${ROCKS_DB_PATH_CONTAINER}:ro - ${ROCKS_BACKUP_DIR}:${ROCKS_BACKUP_DIR}:rw - ${ROCKS_BACKUP_ARCHIVES_DIR}:${ROCKS_BACKUP_ARCHIVES_DIR}:rw - - ${ROCKS_DB_SECONDARY_PATH}:${ROCKS_DB_SECONDARY_PATH_CONTAINER}:rw - - ./heaps:/usr/src/app/heaps:rw + - ${ROCKS_DB_SECONDARY_PATH}/backup:${ROCKS_DB_SECONDARY_PATH_CONTAINER}:rw build: context: . dockerfile: ingester.Dockerfile diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index c81e6e61..ae829a7b 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -115,10 +115,14 @@ pub async fn main() -> Result<(), IngesterError> { &args .rocks_backup_url .expect("rocks_backup_url is required for the restore rocks db process"), - &args - .rocks_backup_archives_dir - .expect("rocks_backup_archives_dir is required for the restore rocks db process"), - &args.rocks_db_path_container, + &PathBuf::from_str( + &args.rocks_backup_archives_dir.expect( + "rocks_backup_archives_dir is required for the restore rocks db process", + ), + ) + .expect("invalid rocks backup archives dir"), + &PathBuf::from_str(&args.rocks_db_path_container) + .expect("invalid rocks backup archives dir"), ) .await?; } diff --git a/nft_ingester/src/bin/rocksdb_backup/main.rs b/nft_ingester/src/bin/rocksdb_backup/main.rs index 45d43f3b..15a6afe2 100644 --- a/nft_ingester/src/bin/rocksdb_backup/main.rs +++ b/nft_ingester/src/bin/rocksdb_backup/main.rs @@ -36,8 +36,8 @@ async fn main() -> Result<(), RocksDbBackupServiceError> { .unwrap(); debug!( - rocks_db_path_container = %args.rocks_db_path_container, - rocks_db_secondary_path = %args.rocks_db_secondary_path, + rocks_db_path_container = ?args.rocks_db_path_container, + rocks_db_secondary_path = ?args.rocks_db_secondary_path, "Opened RocksDb in secondary mode" ); @@ -53,7 +53,7 @@ async fn main() -> Result<(), RocksDbBackupServiceError> { }, )?; - mutexed_tasks.lock().await.spawn(async move { backup_service.perform_backup().await }); + backup_service.perform_backup().await?; Ok(()) } diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 24d6bdfb..02d22a1d 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -1,4 +1,7 @@ +use std::path::PathBuf; + use clap::{ArgAction, Parser, ValueEnum}; +// TODO: replace String paths with PathBuf use figment::value::Dict; use serde::Deserialize; use solana_sdk::commitment_config::CommitmentLevel; @@ -324,13 +327,13 @@ pub struct SynchronizerClapArgs { #[command(author, version, about, long_about = None)] pub struct RocksDbBackupServiceClapArgs { #[clap(long, env, default_value = "./my_rocksdb", help = "Rocks db path container")] - pub rocks_db_path_container: String, + pub rocks_db_path_container: PathBuf, #[clap(long, env, default_value = "./my_rocksdb_secondary", help = "Rocks db secondary path")] - pub rocks_db_secondary_path: String, + pub rocks_db_secondary_path: PathBuf, #[clap(long, env = "ROCKS_BACKUP_ARCHIVES_DIR", help = "Rocks backup archives dir")] - pub backup_archives_dir: String, + pub backup_archives_dir: PathBuf, #[clap(long, env = "ROCKS_BACKUP_DIR", help = "Rocks backup dir")] - pub backup_dir: String, + pub backup_dir: PathBuf, #[clap( long, env = "ROCKS_FLUSH_BEFORE_BACKUP", @@ -338,9 +341,6 @@ pub struct RocksDbBackupServiceClapArgs { )] pub flush_before_backup: bool, - #[clap(long, env, default_value = "/usr/src/app/heaps", help = "Heap path")] - pub heap_path: String, - #[clap(long, env, default_value = "info", help = "warn|info|debug")] pub log_level: String, } diff --git a/nft_ingester/src/rocks_db.rs b/nft_ingester/src/rocks_db.rs index ff0ca3d0..f3881ea2 100644 --- a/nft_ingester/src/rocks_db.rs +++ b/nft_ingester/src/rocks_db.rs @@ -1,5 +1,6 @@ use std::{ fs::{create_dir_all, remove_dir_all}, + path::PathBuf, sync::{ atomic::{AtomicU64, Ordering}, Arc, @@ -48,20 +49,20 @@ pub async fn receive_last_saved_slot( pub async fn restore_rocksdb( rocks_backup_url: &str, - rocks_backup_archives_dir: &str, - rocks_db_path_container: &str, + rocks_backup_archives_dir: &PathBuf, + rocks_db_path_container: &PathBuf, ) -> Result<(), RocksDbBackupServiceError> { create_dir_all(rocks_backup_archives_dir)?; - let backup_path = format!("{}/{}", rocks_backup_archives_dir, INGESTER_BACKUP_NAME); + let backup_path = rocks_backup_archives_dir.join(INGESTER_BACKUP_NAME); backup_service::download_backup_archive(rocks_backup_url, &backup_path).await?; backup_service::unpack_backup_archive(&backup_path, rocks_backup_archives_dir)?; - let unpacked_archive = format!( - "{}/{}", - &rocks_backup_archives_dir, + let unpacked_archive = rocks_backup_archives_dir.join( backup_service::get_backup_dir_name(rocks_backup_url) + .parse::() + .expect("invalid backup dir name"), ); backup_service::restore_external_backup(&unpacked_archive, rocks_db_path_container)?; diff --git a/rocks-db/src/backup_service.rs b/rocks-db/src/backup_service.rs index 88d9e881..60d57dfa 100644 --- a/rocks-db/src/backup_service.rs +++ b/rocks-db/src/backup_service.rs @@ -2,7 +2,7 @@ use std::{ ffi::OsStr, fs::File, io::{BufReader, Write}, - path::Path, + path::{Path, PathBuf}, sync::Arc, }; @@ -13,7 +13,6 @@ use rocksdb::{ Env, DB, }; use serde::{Deserialize, Serialize}; -use tokio::task::JoinError; use tracing::{error, info}; use crate::errors::RocksDbBackupServiceError; @@ -27,8 +26,8 @@ const BASE_BACKUP_ID: u32 = 1; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RocksDbBackupServiceConfig { - pub rocks_backup_dir: String, - pub rocks_backup_archives_dir: String, + pub rocks_backup_dir: PathBuf, + pub rocks_backup_archives_dir: PathBuf, pub rocks_flush_before_backup: bool, } @@ -61,7 +60,7 @@ impl RocksDbBackupService { self.verify_backup_single(backup_id) } - pub async fn perform_backup(&mut self) -> Result<(), JoinError> { + pub async fn perform_backup(&mut self) -> Result<(), RocksDbBackupServiceError> { let start_time = chrono::Utc::now(); let last_backup_id = match self.backup_engine.get_backup_info().last() { None => BASE_BACKUP_ID, @@ -80,22 +79,21 @@ impl RocksDbBackupService { .progress_chars("#>-"), ); - if let Err(err) = self.create_backup(last_backup_id) { + self.create_backup(last_backup_id).inspect_err(|err| { error!(error = %err, "create_backup: {:?}", err); - } - + })?; progress_bar.inc(1); - if let Err(err) = self.delete_old_backups() { + self.delete_old_backups().inspect_err(|err| { error!(error = %err, "delete_old_backups: {:?}", err); - } + })?; progress_bar.inc(1); - if let Err(err) = self.build_backup_archive(start_time.timestamp()) { + self.build_backup_archive(start_time.timestamp()).inspect_err(|err| { error!(error = %err, "build_backup_archive: {:?}", err); - } + })?; progress_bar.inc(1); - if let Err(err) = self.delete_old_archives() { + self.delete_old_archives().inspect_err(|err| { error!(error = %err, "delete_old_archives: {:?}", err); - } + })?; progress_bar.inc(1); progress_bar.finish_with_message("Backup completed!"); @@ -109,7 +107,10 @@ impl RocksDbBackupService { pub fn build_backup_archive(&self, backup_time: i64) -> Result<(), RocksDbBackupServiceError> { let file_path = format!( "{}/{}-{}{}", - self.backup_config.rocks_backup_archives_dir, + self.backup_config + .rocks_backup_archives_dir + .to_str() + .expect("Invalid backup archives dir path"), BACKUP_PREFIX, backup_time, BACKUP_POSTFIX @@ -120,7 +121,13 @@ impl RocksDbBackupService { let mut enc = lz4::EncoderBuilder::new().level(1).build(file)?; let mut tar = tar::Builder::new(&mut enc); - let backup_dir_name = get_backup_dir_name(self.backup_config.rocks_backup_dir.as_str()); + let backup_dir_name = get_backup_dir_name( + self.backup_config + .rocks_backup_dir + .as_path() + .to_str() + .expect("invalid rocks backup dir provided"), + ); tar.append_dir_all(backup_dir_name, self.backup_config.rocks_backup_dir.clone())?; tar.into_inner()?; enc.finish().1?; @@ -196,7 +203,7 @@ pub fn get_backup_dir_name(backup_path: &str) -> String { pub async fn download_backup_archive( url: &str, - backup_path: &str, + backup_path: &PathBuf, ) -> Result<(), RocksDbBackupServiceError> { let resp = reqwest::get(url).await?; if resp.status().is_success() { @@ -211,7 +218,10 @@ pub async fn download_backup_archive( Err(RocksDbBackupServiceError::ReqwestError(resp.status().to_string())) } -pub fn unpack_backup_archive(file_path: &str, dst: &str) -> Result<(), RocksDbBackupServiceError> { +pub fn unpack_backup_archive( + file_path: &PathBuf, + dst: &PathBuf, +) -> Result<(), RocksDbBackupServiceError> { let file = File::open(file_path)?; let decoder = lz4::Decoder::new(BufReader::new(file))?; let mut archive = tar::Archive::new(decoder); @@ -221,8 +231,8 @@ pub fn unpack_backup_archive(file_path: &str, dst: &str) -> Result<(), RocksDbBa } pub fn restore_external_backup( - backup_dir: &str, - new_db_dir: &str, + backup_dir: &PathBuf, + new_db_dir: &PathBuf, ) -> Result<(), RocksDbBackupServiceError> { let env = Env::new()?; let backup_options = BackupEngineOptions::new(backup_dir)?; From 811bbfc6317219245926d1c0eb539ba38fb53ce5 Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Thu, 23 Jan 2025 18:54:29 +0200 Subject: [PATCH 15/33] added two tests for asset index cleaner (#374) --- nft_ingester/tests/api_tests.rs | 175 ++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index 3e47a114..fa0a4851 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -72,6 +72,7 @@ mod tests { Storage, ToFlatbuffersConverter, }; use serde_json::{json, Value}; + use setup::rocks::RocksTestEnvironment; use solana_program::pubkey::Pubkey; use solana_sdk::signature::Signature; use spl_pod::{ @@ -80,6 +81,7 @@ mod tests { }; use spl_token_2022::extension::interest_bearing_mint::BasisPoints; use sqlx::QueryBuilder; + use tempfile::TempDir; use testcontainers::clients::Cli; use tokio::{sync::Mutex, task::JoinSet}; use usecase::proofs::MaybeProofChecker; @@ -3694,4 +3696,177 @@ mod tests { assert_eq!(idx_fungible_asset_iter.count(), 1); assert_eq!(idx_non_fungible_asset_iter.count(), 1); } + + #[tokio::test(flavor = "multi_thread")] + async fn test_idx_cleaner_does_not_erase_updates() { + let cnt = 1; + let slot = 0; + let cli = Cli::default(); + + let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, slot).await; + let nft_token_mint = generated_assets.pubkeys[0]; + let owner: Pubkey = generated_assets.owners[0].owner.value.unwrap(); + + let mut batch_storage = BatchSaveStorage::new( + env.rocks_env.storage.clone(), + 10, + Arc::new(IngesterMetricsConfig::new()), + ); + let token_accounts_processor = + TokenAccountsProcessor::new(Arc::new(IngesterMetricsConfig::new())); + let token_account_addr = Pubkey::new_unique(); + + // receive 10 updates for the asset + for i in 0..10 { + let token_account = TokenAccount { + pubkey: token_account_addr, + mint: nft_token_mint, + delegate: None, + owner, + extensions: None, + frozen: false, + delegated_amount: 0, + slot_updated: i, + amount: 100 + i, + write_version: i as u64, + }; + token_accounts_processor + .transform_and_save_token_account( + &mut batch_storage, + token_account_addr, + &token_account, + ) + .unwrap(); + batch_storage.flush().unwrap(); + } + + let idx_fungible_asset_iter = env.rocks_env.storage.fungible_assets_update_idx.iter_start(); + let idx_non_fungible_asset_iter = env.rocks_env.storage.assets_update_idx.iter_start(); + // 10 fungibles because we have 10 updates and created token account is not counted as fungible, then + // it will be a part of both indexes + assert_eq!(idx_fungible_asset_iter.count(), 10); + assert_eq!(idx_non_fungible_asset_iter.count(), 11); + + // no data was syncronized, then no data should be erased + for asset_type in ASSET_TYPES { + clean_syncronized_idxs( + env.pg_env.client.clone(), + env.rocks_env.storage.clone(), + asset_type, + ) + .await + .unwrap(); + } + + // after sync idxs should be cleaned again + let idx_fungible_asset_iter = env.rocks_env.storage.fungible_assets_update_idx.iter_start(); + let idx_non_fungible_asset_iter = env.rocks_env.storage.assets_update_idx.iter_start(); + // 10 fungibles because we have 10 updates and created token account is not counted as fungible, then + // it will be a part of both indexes + assert_eq!(idx_fungible_asset_iter.count(), 10); + assert_eq!(idx_non_fungible_asset_iter.count(), 11); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_idx_cleaner_erases_updates_partially() { + let rocks_env: RocksTestEnvironment = RocksTestEnvironment::new(&[]); + let number_of_assets = 1; + let slot = 0; + let generated_assets = rocks_env.generate_assets(number_of_assets, slot).await; + let temp_dir = TempDir::new().expect("Failed to create a temporary directory"); + let temp_dir_path = temp_dir.path(); + + let cli: Cli = Cli::default(); + let pg_env = + setup::pg::TestEnvironment::new_with_mount(&cli, temp_dir_path.to_str().unwrap()).await; + let syncronizer = Arc::new(nft_ingester::index_syncronizer::Synchronizer::new( + rocks_env.storage.clone(), + pg_env.client.clone(), + 10, + temp_dir_path.to_str().unwrap().to_string(), + Arc::new(SynchronizerMetricsConfig::new()), + 1, + )); + + let nft_token_mint = generated_assets.pubkeys[0]; + let owner: Pubkey = generated_assets.owners[0].owner.value.unwrap(); + let mut batch_storage = BatchSaveStorage::new( + rocks_env.storage.clone(), + 10, + Arc::new(IngesterMetricsConfig::new()), + ); + let token_accounts_processor = + TokenAccountsProcessor::new(Arc::new(IngesterMetricsConfig::new())); + let token_account_addr = Pubkey::new_unique(); + + // receive 5 updates for the assset and update idxs accordingly + for i in 0..5 { + let token_account = TokenAccount { + pubkey: token_account_addr, + mint: nft_token_mint, + delegate: None, + owner, + extensions: None, + frozen: false, + delegated_amount: 0, + slot_updated: i, + amount: 100 + i, + write_version: i as u64, + }; + token_accounts_processor + .transform_and_save_token_account( + &mut batch_storage, + token_account_addr, + &token_account, + ) + .unwrap(); + batch_storage.flush().unwrap(); + } + let (_tx, rx) = tokio::sync::broadcast::channel::<()>(1); + for asset_type in ASSET_TYPES { + syncronizer.full_syncronize(&rx, asset_type).await.unwrap(); + } + + // receive 5 more updates for the same asset + for i in 0..5 { + let token_account = TokenAccount { + pubkey: token_account_addr, + mint: nft_token_mint, + delegate: None, + owner, + extensions: None, + frozen: false, + delegated_amount: 0, + slot_updated: i, + amount: 100 + i, + write_version: i as u64, + }; + token_accounts_processor + .transform_and_save_token_account( + &mut batch_storage, + token_account_addr, + &token_account, + ) + .unwrap(); + batch_storage.flush().unwrap(); + } + + // full story of idxs is stored + let idx_fungible_asset_iter = rocks_env.storage.fungible_assets_update_idx.iter_start(); + let idx_non_fungible_asset_iter = rocks_env.storage.assets_update_idx.iter_start(); + assert_eq!(idx_fungible_asset_iter.count(), 10); + assert_eq!(idx_non_fungible_asset_iter.count(), 11); + + for asset_type in ASSET_TYPES { + clean_syncronized_idxs(pg_env.client.clone(), rocks_env.storage.clone(), asset_type) + .await + .unwrap(); + } + + // after sync idxs should be half cleaned + let idx_fungible_asset_iter = rocks_env.storage.fungible_assets_update_idx.iter_start(); + let idx_non_fungible_asset_iter = rocks_env.storage.assets_update_idx.iter_start(); + assert_eq!(idx_fungible_asset_iter.count(), 6); + assert_eq!(idx_non_fungible_asset_iter.count(), 6); + } } From 72a9ad0d8d1f41173e10f64e8a3357c923fdd467 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Fri, 24 Jan 2025 15:07:49 +0200 Subject: [PATCH 16/33] feat(ingester): add metrics for `RedisReceiver` --- metrics_utils/src/lib.rs | 98 +++++++++++++++++++++++++++ nft_ingester/src/bin/ingester/main.rs | 10 ++- nft_ingester/src/config.rs | 2 +- nft_ingester/src/message_parser.rs | 20 +++--- nft_ingester/src/redis_receiver.rs | 20 ++++-- 5 files changed, 131 insertions(+), 19 deletions(-) diff --git a/metrics_utils/src/lib.rs b/metrics_utils/src/lib.rs index 4a177b4b..e7dac5f6 100644 --- a/metrics_utils/src/lib.rs +++ b/metrics_utils/src/lib.rs @@ -56,6 +56,7 @@ pub struct MetricState { pub fork_cleaner_metrics: Arc, pub batch_mint_processor_metrics: Arc, pub batch_mint_persisting_metrics: Arc, + pub redis_receiver_metrics: Arc, pub registry: Registry, } @@ -83,6 +84,7 @@ impl MetricState { batch_mint_processor_metrics: Arc::new(BatchMintProcessorMetricsConfig::new()), batch_mint_persisting_metrics: Arc::new(BatchMintPersisterMetricsConfig::new()), red_metrics: Arc::new(RequestErrorDurationMetrics::new()), + redis_receiver_metrics: Arc::new(RedisReceiverMetricsConfig::new()), registry: Registry::default(), } } @@ -558,6 +560,7 @@ impl MetricsTrait for MetricState { self.red_metrics.register(&mut self.registry); self.fork_cleaner_metrics.register(&mut self.registry); self.batch_mint_processor_metrics.register(&mut self.registry); + self.redis_receiver_metrics.register(&mut self.registry); } } @@ -1267,3 +1270,98 @@ impl BatchMintPersisterMetricsConfig { ); } } + +#[derive(Debug, Clone)] +pub struct RedisReceiverMetricsConfig { + start_time: Gauge, + transactions_received: Counter, + accounts_received: Counter, + transactions_parsed: Counter, + accounts_parsed: Counter, + transaction_parse_errors: Counter, + account_parse_errors: Counter, +} + +impl Default for RedisReceiverMetricsConfig { + fn default() -> Self { + Self::new() + } +} + +impl RedisReceiverMetricsConfig { + pub fn new() -> Self { + Self { + start_time: Default::default(), + transactions_received: Default::default(), + accounts_received: Default::default(), + transactions_parsed: Default::default(), + accounts_parsed: Default::default(), + transaction_parse_errors: Default::default(), + account_parse_errors: Default::default(), + } + } + pub fn start_time(&self) -> i64 { + self.start_time.set(Utc::now().timestamp()) + } + pub fn inc_transactions_received_by(&self, count: u64) -> u64 { + self.transactions_received.inc_by(count) + } + pub fn inc_accounts_received_by(&self, count: u64) -> u64 { + self.accounts_received.inc_by(count) + } + pub fn inc_transactions_parsed_by(&self, count: u64) -> u64 { + self.transactions_parsed.inc_by(count) + } + pub fn inc_accounts_parsed_by(&self, count: u64) -> u64 { + self.accounts_parsed.inc_by(count) + } + pub fn inc_transaction_parse_errors_by(&self, count: u64) -> u64 { + self.transaction_parse_errors.inc_by(count) + } + pub fn inc_account_parse_errors_by(&self, count: u64) -> u64 { + self.account_parse_errors.inc_by(count) + } + pub fn register(&self, registry: &mut Registry) { + registry.register( + "redis_receiver_start_time", + "Redis receiver metrics start time", + self.start_time.clone(), + ); + + registry.register( + "redis_total_transactions_received", + "Total transactions received (by all workers)", + self.transactions_received.clone(), + ); + + registry.register( + "redis_total_accounts_received", + "Total accounts received (by all workers)", + self.accounts_received.clone(), + ); + + registry.register( + "redis_total_transactions_parsed", + "Total transactions parsed (by all workers)", + self.transactions_parsed.clone(), + ); + + registry.register( + "redis_total_accounts_parsed", + "Total accounts parsed (by all workers)", + self.accounts_parsed.clone(), + ); + + registry.register( + "redis_total_transaction_parse_errors", + "Total transaction parse errors", + self.transaction_parse_errors.clone(), + ); + + registry.register( + "redis_total_account_parse_errors", + "Total account parse errors (by all workers)", + self.account_parse_errors.clone(), + ); + } +} diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index ae829a7b..d9f5d059 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -192,8 +192,13 @@ pub async fn main() -> Result<(), IngesterError> { }, }; let redis_receiver = Arc::new( - RedisReceiver::new(personal_message_config, ConsumptionType::All, ack_channel.clone()) - .await?, + RedisReceiver::new( + personal_message_config, + ConsumptionType::All, + ack_channel.clone(), + metrics_state.redis_receiver_metrics.clone(), + ) + .await?, ); run_accounts_processor( @@ -236,6 +241,7 @@ pub async fn main() -> Result<(), IngesterError> { personal_message_config.clone(), ConsumptionType::All, ack_channel.clone(), + metrics_state.redis_receiver_metrics.clone(), ) .await?, ); diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 02d22a1d..9808f8a5 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -580,7 +580,7 @@ impl BigTableConfig { pub fn init_logger(log_level: &str) { let t = tracing_subscriber::fmt().with_env_filter(log_level); - t.event_format(fmt::format::json()).init(); + t.event_format(fmt::format::json().with_line_number(true).with_file(true)).init(); } #[cfg(test)] diff --git a/nft_ingester/src/message_parser.rs b/nft_ingester/src/message_parser.rs index 321d7df3..d465b398 100644 --- a/nft_ingester/src/message_parser.rs +++ b/nft_ingester/src/message_parser.rs @@ -1,4 +1,4 @@ -use std::{fmt::Debug, str::FromStr, sync::Arc}; +use std::{str::FromStr, sync::Arc}; use blockbuster::{ error::BlockbusterError, @@ -19,7 +19,7 @@ use entities::{ use flatbuffers::FlatBufferBuilder; use itertools::Itertools; use solana_program::{program_pack::Pack, pubkey::Pubkey}; -use tracing::log::{debug, warn}; +use tracing::{debug, warn}; use utils::flatbuffer::account_data_generated::account_data::root_as_account_data; use crate::{ @@ -150,7 +150,7 @@ impl MessageParser { }; }, Err(e) => { - account_parsing_error(e, account_info); + warn!(error = %e, pubkey = %account_info.pubkey, "Error while parsing account: {:?} {:?}", e, account_info); }, } @@ -249,7 +249,7 @@ impl MessageParser { }; }, Err(e) => { - account_parsing_error(e, account_update); + warn!(error = %e, pubkey = %account_update.pubkey, "Error while parsing account: {:?} {:?}", e, account_update); }, } @@ -349,7 +349,9 @@ impl MessageParser { Err(e) => match e { BlockbusterError::AccountTypeNotImplemented | BlockbusterError::UninitializedAccount => {}, - _ => account_parsing_error(e, account_info), + _ => { + warn!(error = %e, pubkey = %account_info.pubkey, "Error while parsing account: {:?} {:?}", e, account_info) + }, }, } @@ -372,7 +374,7 @@ impl MessageParser { }; }, Err(e) => { - account_parsing_error(e, account_info); + warn!(error = %e, pubkey = %account_info.pubkey, "Error while parsing account: {:?} {:?}", e, account_info) }, } @@ -409,7 +411,7 @@ impl MessageParser { ParsedInscription::UnhandledAccount => {}, }, Err(e) => { - account_parsing_error(e, account_info); + warn!(error = %e, pubkey = %account_info.pubkey, "Error while parsing account: {:?} {:?}", e, account_info) }, } @@ -508,7 +510,3 @@ fn map_account_info_fb_bytes( Ok(builder.finished_data().to_owned()) } - -fn account_parsing_error(err: impl Debug, account_info: &plerkle::AccountInfo) { - warn!("Error while parsing account: {:?} {}", err, account_info.pubkey); -} diff --git a/nft_ingester/src/redis_receiver.rs b/nft_ingester/src/redis_receiver.rs index 6f85739b..63096e1f 100644 --- a/nft_ingester/src/redis_receiver.rs +++ b/nft_ingester/src/redis_receiver.rs @@ -6,13 +6,14 @@ use interface::{ error::UsecaseError, signature_persistence::UnprocessedTransactionsGetter, unprocessed_data_getter::UnprocessedAccountsGetter, }; +use metrics_utils::RedisReceiverMetricsConfig; use num_traits::Zero; use plerkle_messenger::{ redis_messenger::RedisMessenger, ConsumptionType, Messenger, MessengerConfig, ACCOUNT_STREAM, TRANSACTION_STREAM, }; use tokio::sync::{mpsc::UnboundedSender, Mutex}; -use tracing::{info, log::error}; +use tracing::{error, info}; use crate::{error::IngesterError, message_parser::MessageParser}; @@ -21,6 +22,7 @@ pub struct RedisReceiver { message_parser: Arc, messanger: Mutex, ack_channel: UnboundedSender<(&'static str, String)>, + metrics: Arc, } impl RedisReceiver { @@ -28,11 +30,12 @@ impl RedisReceiver { config: MessengerConfig, consumption_type: ConsumptionType, ack_channel: UnboundedSender<(&'static str, String)>, + metrics: Arc, ) -> Result { info!("Initializing RedisReceiver..."); let message_parser = Arc::new(MessageParser::new()); let messanger = Mutex::new(RedisMessenger::new(config).await?); - Ok(Self { messanger, consumption_type, message_parser, ack_channel }) + Ok(Self { messanger, consumption_type, message_parser, ack_channel, metrics }) } } @@ -46,19 +49,23 @@ impl UnprocessedTransactionsGetter for RedisReceiver { .recv(TRANSACTION_STREAM, self.consumption_type.clone()) .await .map_err(|e| UsecaseError::Messenger(e.to_string()))?; + self.metrics.inc_transactions_received_by(recv_data.len() as u64); let mut result = Vec::new(); for item in recv_data { if let Some(tx) = self.message_parser.parse_transaction(item.data, false) { result.push(BufferedTxWithID { tx, id: item.id }) + } else { + self.metrics.inc_transaction_parse_errors_by(1); } } + self.metrics.inc_transactions_parsed_by(result.len() as u64); Ok(result) } fn ack(&self, id: String) { let send = self.ack_channel.send((TRANSACTION_STREAM, id)); if let Err(err) = send { - error!("Account stream ack error: {}", err); + error!(error = %err, "Account stream ack error: {:?}", err); } } } @@ -77,6 +84,7 @@ impl UnprocessedAccountsGetter for RedisReceiver { .await .map_err(|e| UsecaseError::Messenger(e.to_string()))?; + self.metrics.inc_accounts_received_by(recv_data.len() as u64); let mut result = Vec::new(); let mut unknown_account_types_ids = Vec::new(); for item in recv_data { @@ -101,10 +109,12 @@ impl UnprocessedAccountsGetter for RedisReceiver { } }, Err(err) => { - error!("Parsing account: {}", err) + error!(error = %err, "Error parsing account: {:?}", err); + self.metrics.inc_account_parse_errors_by(1); }, } } + self.metrics.inc_accounts_parsed_by(result.len() as u64); UnprocessedAccountsGetter::ack(self, unknown_account_types_ids); Ok(result) @@ -117,7 +127,7 @@ impl UnprocessedAccountsGetter for RedisReceiver { } let send = self.ack_channel.send((ACCOUNT_STREAM, id)); if let Err(err) = send { - error!("Account stream ack error: {}", err); + error!(error = %err, "Account stream ack error: {:?}", err); } } } From 8d06fdc9f6d4e974c9e94be37ea7bf7f66d2c8ee Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Fri, 24 Jan 2025 17:08:55 +0200 Subject: [PATCH 17/33] Add tets for token 22 with mpl metadata and token 22 extentions (#383) --- ...8tNo4heHxUb1hRr3ZTQfL9B3nXWyaE2cXGdGPadt3E | Bin 0 -> 752 bytes ...2WiAY3gxkyeL3CqpQ6u7Nhhph7DiX3uzowcwT1Fwgj | Bin 0 -> 320 bytes ...y4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn | Bin 0 -> 312 bytes ...y4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn | 1 + integration_tests/src/regular_nft_tests.rs | 30 +++++++ ..._get_asset_nft_token_22_with_metadata.snap | 74 ++++++++++++++++++ 6 files changed, 105 insertions(+) create mode 100644 integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/8Q8tNo4heHxUb1hRr3ZTQfL9B3nXWyaE2cXGdGPadt3E create mode 100644 integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/BS2WiAY3gxkyeL3CqpQ6u7Nhhph7DiX3uzowcwT1Fwgj create mode 100644 integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn create mode 100644 integration_tests/src/data/largest_token_account_ids/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn create mode 100644 integration_tests/src/snapshots/integration_tests__regular_nft_tests__get_asset_nft_token_22_with_metadata.snap diff --git a/integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/8Q8tNo4heHxUb1hRr3ZTQfL9B3nXWyaE2cXGdGPadt3E b/integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/8Q8tNo4heHxUb1hRr3ZTQfL9B3nXWyaE2cXGdGPadt3E new file mode 100644 index 0000000000000000000000000000000000000000..5dd771b67913fcb4839c88e8bc9d86df0a81e222 GIT binary patch literal 752 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQ%zZDKx=r!RgOg7RUnu=lj%78vqj8bt zk8R1PSDg{ox_7w?Qa3)nSmSCjH>f_Fe%kpb zg`(?s*>3CmWB#JuD~IRdg)avKSu?bAnZzYFwr}^+u(;Z-05sSyF*j8qttdZN!8f%e wF(t7i5o#Nh;zHq`0P;$UG9emrv1&k3KbSdcz#wE~{0|G=!R*w5a2q270K79^(EtDd literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/BS2WiAY3gxkyeL3CqpQ6u7Nhhph7DiX3uzowcwT1Fwgj b/integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/BS2WiAY3gxkyeL3CqpQ6u7Nhhph7DiX3uzowcwT1Fwgj new file mode 100644 index 0000000000000000000000000000000000000000..4902a706e96154b3907fae323b5c52a3128a0002 GIT binary patch literal 320 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQob^w#P{KIss?lMI7plxhCFHV$PnaJo zOG(?G*S+xTHx9PD-yXgz?Y}4C6uak1&e>brr2hCe@8nhgxA&*UjfLHRG=OF-5K;h= z3=IFFfDz1NV0do%arzV}XB|*%{Vv;WeSgefw0q_7JiPGbU?6LTb}o~+#K!jRUK$ox zyJh~=i>nqkm1aKXO-qP=y_{{k=+=#|9=z={X+5|@t56hXF4TCu6e(&^3}Rvja(Tf* E0J+UjWdHyG literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn b/integration_tests/src/data/accounts/get_asset_nft_token_22_with_metadata/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn new file mode 100644 index 0000000000000000000000000000000000000000..1ac503db3aed7901e60290f203e5f6adfd064c7f GIT binary patch literal 312 zcmY#jfB*@G3brr2hCe@8nhgxA&*UjfLHRKxRHzDi0(X z82&>6Bbdj)P;dEh`V=5%6_5`S3wc(0xoxYd(AUVj*NZ;y{_XYUW0urs)j;XD@})n@ VPi}(gV_+mkHPjMnQjDBn(*UteSJnUk literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/largest_token_account_ids/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn b/integration_tests/src/data/largest_token_account_ids/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn new file mode 100644 index 00000000..06d2fa5c --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn/Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn @@ -0,0 +1 @@ +šþq3ZÕ2Ãè%ÅkSÈ7Ævdf¿n‹¡õö \ No newline at end of file diff --git a/integration_tests/src/regular_nft_tests.rs b/integration_tests/src/regular_nft_tests.rs index 0eca3c00..6bcd6623 100644 --- a/integration_tests/src/regular_nft_tests.rs +++ b/integration_tests/src/regular_nft_tests.rs @@ -3,6 +3,7 @@ use std::sync::Arc; use entities::api_req_params::{GetAsset, GetAssetBatch, GetAssetsByGroup, SearchAssets}; use function_name::named; use itertools::Itertools; +use rocks_db::storage_traits::AssetIndexReader; use serial_test::serial; use tokio::{sync::Mutex, task::JoinSet}; @@ -192,3 +193,32 @@ async fn test_regular_nft_collection() { let response = setup.das_api.get_asset(request, mutexed_tasks.clone()).await.unwrap(); insta::assert_json_snapshot!(name.clone(), response); } + +#[tokio::test] +#[serial] +#[named] +async fn get_asset_nft_token_22_with_metadata() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { network: Some(Network::Devnet), clear_db: true }, + ) + .await; + + let seeds: Vec = seed_nfts(["Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_asset(request, mutexed_tasks.clone()).await.unwrap(); + + insta::assert_json_snapshot!(name, response); +} diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__get_asset_nft_token_22_with_metadata.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__get_asset_nft_token_22_with_metadata.snap new file mode 100644 index 00000000..b4a07c43 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__get_asset_nft_token_22_with_metadata.snap @@ -0,0 +1,74 @@ +--- +source: integration_tests/src/regular_nft_tests.rs +assertion_line: 226 +expression: response +snapshot_kind: text +--- +{ + "interface": "V1_NFT", + "id": "Cpy4TfoLi1qtcx1grKx373NVksQ2xA3hMyNQvT2HFfQn", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "uri from metadata", + "files": [], + "metadata": { + "name": "Name from Metadata", + "symbol": "", + "token_standard": "NonFungible" + }, + "links": {} + }, + "authorities": [ + { + "address": "2x9j2onZBPfKCxkYpL8zyfNiH3987RWHG3rzW12yceDJ", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.055, + "basis_points": 550, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "single", + "owner": "2x9j2onZBPfKCxkYpL8zyfNiH3987RWHG3rzW12yceDJ" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, + "mutable": true, + "burnt": false, + "lamports": 15115600, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 18446744073709551615, + "token_info": { + "supply": 1, + "decimals": 0, + "token_program": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb", + "mint_authority": "6iR4spxZ2ZYTNWQHEe1UFxXn8rLQbYmnmAPYizKFRsY5", + "freeze_authority": "6iR4spxZ2ZYTNWQHEe1UFxXn8rLQbYmnmAPYizKFRsY5" + } +} From 11ad39b4a52fee389edd5f44afa4a26fd07f7222 Mon Sep 17 00:00:00 2001 From: Vadim <31490938+n00m4d@users.noreply.github.com> Date: Fri, 24 Jan 2025 16:33:58 +0100 Subject: [PATCH 18/33] MTG 995 Add integration tests with full dbs sync scenario (#378) * tests: add integration tests with full dbs sync scenario * feat: change volume mount in CI * fix: CI * fix: CI * fix: CI * fix: CI * fix: add permissions to created dir * fix: move steps * fix: CI * fix: CI * fix: CI * fix: return back launching all the test * fix: mount for local Postgre --- .github/workflows/rust.yml | 6 +- .gitignore | 2 + integration_tests/README.md | 4 +- integration_tests/run_postgres.sh | 34 +++ integration_tests/src/account_update_tests.rs | 2 +- integration_tests/src/common.rs | 54 ++++- ...Fhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb | Bin 0 -> 240 bytes ...bRNh9Q9pcksZVnmQemoh7is2NqsRNTx4jmpv75knC6 | Bin 0 -> 224 bytes ...tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj | Bin 0 -> 256 bytes ...eHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd | Bin 0 -> 312 bytes ...FGrBUK1Ctgr8RBftsWH952hS69hzesBpnyThWC6MjR | Bin 0 -> 824 bytes ...sxPaf6QFNBaN3LiVQAke99WaFMhT8JC2bWityF7mwZ | Bin 0 -> 424 bytes ...cFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz | Bin 0 -> 256 bytes ...SyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci | Bin 0 -> 240 bytes ...zsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB | Bin 0 -> 256 bytes ...kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264 | Bin 0 -> 256 bytes ...hzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7 | Bin 0 -> 304 bytes ...TMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k | Bin 0 -> 256 bytes ...Fhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb | Bin 0 -> 240 bytes ...bRNh9Q9pcksZVnmQemoh7is2NqsRNTx4jmpv75knC6 | Bin 0 -> 224 bytes ...tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj | Bin 0 -> 256 bytes ...eHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd | Bin 0 -> 312 bytes ...FGrBUK1Ctgr8RBftsWH952hS69hzesBpnyThWC6MjR | Bin 0 -> 824 bytes ...sxPaf6QFNBaN3LiVQAke99WaFMhT8JC2bWityF7mwZ | Bin 0 -> 424 bytes ...3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr | Bin 0 -> 376 bytes integration_tests/src/lib.rs | 1 + ..._and_regular_nfts_get_assets_by_owner.snap | 147 ++++++++++++ ...ull_sync_core_get_assets_by_authority.snap | 141 ++++++++++++ ...s__full_sync_core_get_assets_by_group.snap | 211 ++++++++++++++++++ ...s__full_sync_core_get_assets_by_owner.snap | 79 +++++++ integration_tests/src/synchronizer_tests.rs | 209 +++++++++++++++++ 31 files changed, 875 insertions(+), 15 deletions(-) create mode 100755 integration_tests/run_postgres.sh create mode 100644 integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb create mode 100644 integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/8qbRNh9Q9pcksZVnmQemoh7is2NqsRNTx4jmpv75knC6 create mode 100644 integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj create mode 100644 integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/CoeHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd create mode 100644 integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/DHFGrBUK1Ctgr8RBftsWH952hS69hzesBpnyThWC6MjR create mode 100644 integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/HEsxPaf6QFNBaN3LiVQAke99WaFMhT8JC2bWityF7mwZ create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_authority/4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_authority/9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_group/EgzsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_group/J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264 create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_group/JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7 create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_group/kTMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/8qbRNh9Q9pcksZVnmQemoh7is2NqsRNTx4jmpv75knC6 create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/CoeHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/DHFGrBUK1Ctgr8RBftsWH952hS69hzesBpnyThWC6MjR create mode 100644 integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/HEsxPaf6QFNBaN3LiVQAke99WaFMhT8JC2bWityF7mwZ create mode 100644 integration_tests/src/data/accounts/sync/x3hJtpU4AUsGejNvxzX9TKjcyNB1eYtDdDPWdeF6opr create mode 100644 integration_tests/src/snapshots/integration_tests__synchronizer_tests__full_sync_core_and_regular_nfts_get_assets_by_owner.snap create mode 100644 integration_tests/src/snapshots/integration_tests__synchronizer_tests__full_sync_core_get_assets_by_authority.snap create mode 100644 integration_tests/src/snapshots/integration_tests__synchronizer_tests__full_sync_core_get_assets_by_group.snap create mode 100644 integration_tests/src/snapshots/integration_tests__synchronizer_tests__full_sync_core_get_assets_by_owner.snap create mode 100644 integration_tests/src/synchronizer_tests.rs diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c447f94e..5e6bebb7 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -78,6 +78,7 @@ jobs: --health-interval 10s --health-timeout 5s --health-retries 5 + --mount type=bind,source=${{ github.workspace }},target=/aura,readonly steps: - name: Check out @@ -127,9 +128,12 @@ jobs: target/ key: ${{ steps.cache-cargo.outputs.cache-primary-key }} + - name: Prepare directories + run: mkdir -p integration_tests/rocks_dump + - name: Run tests env: DATABASE_TEST_URL: "postgres://postgres:postgres@127.0.0.1:5432/postgres" DEVNET_RPC_URL: ${{ secrets.SOLANA_DEVNET_RPC_URL }} MAINNET_RPC_URL: ${{ secrets.SOLANA_MAINNET_RPC_URL }} - run: cargo test --features integration_tests -- --nocapture + run: cargo test --features integration_tests -- --nocapture \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8395953f..e4e19b52 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,5 @@ my_rocksdb/ _rocksdb_backup/ _rocksdb_backup_archives/ /.project +# Used by integration tests +rocks_dump diff --git a/integration_tests/README.md b/integration_tests/README.md index 1b533abe..e7ec1a4a 100644 --- a/integration_tests/README.md +++ b/integration_tests/README.md @@ -7,7 +7,9 @@ This Cargo package helps us run multi-package tests in our workspace. This setup ## Setup -First setup a local Postgres database and export the postgres database URL as follows: +First, set up a local PostgreSQL database. This can be done using the `run_postgres.sh` script. It is highly recommended to use this script as it creates and mounts the required directory, `rocks_dump`. This directory is essential for tests involving full database synchronization. + +Then export the postgres database URL as follows: ```export DATABASE_TEST_URL=postgres://postgres@localhost/``` Also gain access to mainnet RPCs and devnet RPCs and export the URLs as follows. Currently, diff --git a/integration_tests/run_postgres.sh b/integration_tests/run_postgres.sh new file mode 100755 index 00000000..95515a52 --- /dev/null +++ b/integration_tests/run_postgres.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +CONTAINER_NAME="test_db" +IMAGE_NAME="postgres:14" +DB_USER="solana" +DB_PASSWORD="solana" +DB_NAME="solana" +DB_PATH="./db-data" +ROCKS_DUMP_PATH="./rocks_dump" +HOST_PORT="5432" +CONTAINER_PORT="5432" + +mkdir -p "$DB_PATH" +mkdir -p "$ROCKS_DUMP_PATH" + +docker run -d \ + --name $CONTAINER_NAME \ + -e POSTGRES_USER=$DB_USER \ + -e POSTGRES_PASSWORD=$DB_PASSWORD \ + -e POSTGRES_DB=$DB_NAME \ + -v "$DB_PATH:/var/lib/postgresql/data:rw" \ + -v "$ROCKS_DUMP_PATH:/aura/integration_tests/rocks_dump:ro" \ + -p $HOST_PORT:$CONTAINER_PORT \ + --shm-size=1g \ + $IMAGE_NAME \ + postgres -c log_statement=none \ + -c log_destination=stderr \ + +if [ $? -eq 0 ]; then + echo "PostgreSQL container '$CONTAINER_NAME' is running." +else + echo "Failed to start the PostgreSQL container." + exit 1 +fi diff --git a/integration_tests/src/account_update_tests.rs b/integration_tests/src/account_update_tests.rs index 9568f99f..904efb6b 100644 --- a/integration_tests/src/account_update_tests.rs +++ b/integration_tests/src/account_update_tests.rs @@ -106,7 +106,7 @@ async fn index_account_update(setup: &TestSetup, pubkey: Pubkey, update: Account let is_startup = false; let fbb = serialize_account(fbb, &account_info, slot, is_startup); - index_account_bytes(setup, fbb.finished_data().to_vec()).await; + index_and_sync_account_bytes(setup, fbb.finished_data().to_vec()).await; } #[tokio::test] diff --git a/integration_tests/src/common.rs b/integration_tests/src/common.rs index 8c55b9c6..442736d5 100644 --- a/integration_tests/src/common.rs +++ b/integration_tests/src/common.rs @@ -65,8 +65,10 @@ const API_MAX_PAGE_LIMIT: usize = 100; const DUMP_SYNCHRONIZER_BATCH_SIZE: usize = 1000; const SYNCHRONIZER_PARALLEL_TASKS: usize = 1; +const SYNCHRONIZER_DUMP_PATH: &str = "rocks_dump"; const POSTGRE_MIGRATIONS_PATH: &str = "../migrations"; +const POSTGRE_BASE_DUMP_PATH: &str = "/aura/integration_tests/"; pub struct TestSetup { pub name: String, @@ -105,7 +107,7 @@ impl TestSetup { red_metrics.clone(), MIN_PG_CONNECTIONS, POSTGRE_MIGRATIONS_PATH, - Some(PathBuf::from_str("./dump").unwrap()), + Some(PathBuf::from_str(POSTGRE_BASE_DUMP_PATH).unwrap()), None, ) .await @@ -185,7 +187,7 @@ impl TestSetup { storage.clone(), index_storage.clone(), DUMP_SYNCHRONIZER_BATCH_SIZE, - "./dump".to_string(), + SYNCHRONIZER_DUMP_PATH.to_string(), metrics_state.synchronizer_metrics.clone(), SYNCHRONIZER_PARALLEL_TASKS, ); @@ -395,7 +397,16 @@ pub async fn get_token_largest_account(client: &RpcClient, mint: Pubkey) -> anyh } } -pub async fn index_account_bytes(setup: &TestSetup, account_bytes: Vec) { +pub async fn index_and_sync_account_bytes(setup: &TestSetup, account_bytes: Vec) { + process_and_save_accounts_to_rocks(setup, account_bytes).await; + + let (_shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + // copy data to Postgre + setup.synchronizer.synchronize_nft_asset_indexes(&shutdown_rx, 1000).await.unwrap(); + setup.synchronizer.synchronize_fungible_asset_indexes(&shutdown_rx, 1000).await.unwrap(); +} + +async fn process_and_save_accounts_to_rocks(setup: &TestSetup, account_bytes: Vec) { let parsed_acc = setup.message_parser.parse_account(account_bytes, false).unwrap(); let ready_to_process = parsed_acc .into_iter() @@ -425,11 +436,6 @@ pub async fn index_account_bytes(setup: &TestSetup, account_bytes: Vec) { .await; let _ = batch_storage.flush(); - - let (_shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); - setup.synchronizer.synchronize_nft_asset_indexes(&shutdown_rx, 1000).await.unwrap(); - - setup.synchronizer.synchronize_fungible_asset_indexes(&shutdown_rx, 1000).await.unwrap(); } pub async fn cached_fetch_account( @@ -539,11 +545,12 @@ pub enum Order { AllPermutations, } +/// Data will be indexed, saved to RocskDB and copied to Postgre. pub async fn index_seed_events(setup: &TestSetup, events: Vec<&SeedEvent>) { for event in events { match event { SeedEvent::Account(account) => { - index_account_with_ordered_slot(setup, *account).await; + index_and_sync_account_with_ordered_slot(setup, *account).await; }, SeedEvent::Nft(mint) => { index_nft(setup, *mint).await; @@ -558,6 +565,23 @@ pub async fn index_seed_events(setup: &TestSetup, events: Vec<&SeedEvent>) { } } +/// Data will be indexed and saved to one DB - RocksDB. +/// +/// For sync with Postgre additional method should be called. +pub async fn single_db_index_seed_events(setup: &TestSetup, events: Vec<&SeedEvent>) { + for event in events { + match event { + SeedEvent::Account(account) => { + index_account_with_ordered_slot(setup, *account).await; + }, + _ => { + // TODO: add more seed events processing if it needs + panic!("Current SeedEvent is not supported for single DB processing") + }, + } + } +} + #[allow(unused)] pub fn seed_account(str: &str) -> SeedEvent { SeedEvent::Account(Pubkey::from_str(str).unwrap()) @@ -616,7 +640,7 @@ pub async fn index_account(setup: &TestSetup, account: Pubkey) { // they are "stale". let slot = Some(DEFAULT_SLOT); let account_bytes = cached_fetch_account(setup, account, slot).await; - index_account_bytes(setup, account_bytes).await; + index_and_sync_account_bytes(setup, account_bytes).await; } #[derive(Clone, Copy)] @@ -632,10 +656,16 @@ pub async fn get_nft_accounts(setup: &TestSetup, mint: Pubkey) -> NftAccounts { NftAccounts { mint, metadata: metadata_account, token: token_account } } +async fn index_and_sync_account_with_ordered_slot(setup: &TestSetup, account: Pubkey) { + let slot = None; + let account_bytes = cached_fetch_account(setup, account, slot).await; + index_and_sync_account_bytes(setup, account_bytes).await; +} + async fn index_account_with_ordered_slot(setup: &TestSetup, account: Pubkey) { let slot = None; let account_bytes = cached_fetch_account(setup, account, slot).await; - index_account_bytes(setup, account_bytes).await; + process_and_save_accounts_to_rocks(setup, account_bytes).await; } async fn index_token_mint(setup: &TestSetup, mint: Pubkey) { @@ -650,7 +680,7 @@ async fn index_token_mint(setup: &TestSetup, mint: Pubkey) { let metadata_account = Metadata::find_pda(&mint).0; match cached_fetch_account_with_error_handling(setup, metadata_account, slot).await { Ok(account_bytes) => { - index_account_bytes(setup, account_bytes).await; + index_and_sync_account_bytes(setup, account_bytes).await; }, Err(_) => { // If we can't find the metadata account, then we assume that the mint is not an NFT. diff --git a/integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb b/integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb new file mode 100644 index 0000000000000000000000000000000000000000..5cc475562418dbd6e50401d2dfdb34ec2f6a9504 GIT binary patch literal 240 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQG_bs|;Z$ExPQ|-5Z08Sut=W3#=Io$p z5>p@NW*(pX>ucTmkktZvr!RF{Fz@^38J%}|E>Br<=H>lmXHVAc(^ZaM?x6uRW5Hx` zAj!b+9}52LF%<>zS+dsjPGJPG7#QM!n6-QI|1j1PuhK%tbKmCMF7-TfBCx=3+b8B4 zC2PHBj&oP>0~Lj&7MCbE=jY_4CYNO9=g9$uGD=DcimmkZQ!5g43vyERlJj%*li?~r I7CsNcfguRU22u!Dre_g9_F{q$yi$ZCPT)0a9enD>42jLy3}m!~W_^YZ?(vnT8J=_*Gr_s{^E!JuUT zA{qWe!QVZmq9Fdu>@~ep7(pxshCCoQ|brkvv{rc9T#E=0U#iGOhE`%Dkn zv)U`z1SFXZEH7+0)fbdg@oo*<`NLmpw%)lpJ7}83)W^A*$0z^#TE_)6B_y@DM8UDR iIJHC)$jc}xDJZtm*H5iT%q_@C)l1IL)lURT0_6Y?!en0n literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/CoeHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd b/integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/CoeHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd new file mode 100644 index 0000000000000000000000000000000000000000..6437eb6fb94f5454a167771cc4614ecf547260d9 GIT binary patch literal 312 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQT%VkKDbT3vjNsa9mYy#joVsX!H7NP| z!6yp#mJD{X&kfk_etUR5b>ZZjryshVdtJFkW=rea_-ONqRy|)^>;5yW)Bu|CK~^3} zGBEsy0*1Y&q9FeD{58E(7(pxshNVDUQZn22P}q5j^miPykzR&cKJ*&NfO+XTA6xcjM1Sx9K%>n?5 Cno!jM literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/DHFGrBUK1Ctgr8RBftsWH952hS69hzesBpnyThWC6MjR b/integration_tests/src/data/accounts/full_sync_core_and_regular_nfts_get_assets_by_owner/DHFGrBUK1Ctgr8RBftsWH952hS69hzesBpnyThWC6MjR new file mode 100644 index 0000000000000000000000000000000000000000..08a6353a0f7dcc763fab601585774b2c93401ee9 GIT binary patch literal 824 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQ+*Z2g2t#PF@w(|tL)TwCvm(PgsQ&G) z194ZR_?KM&G@XUJAai#4tmbA#%$Sq`6#*pX8#wczho&utg?F#phW(*T*dB@{?9 zF#LxC#=WMZApYLsHN8_9K`aJ_`L#{@LdLcoW)YSC3P6KGGD_1y2I2x- zK#33^nCuB4Bcr6Gpx8=ZKe4DhHL)yJFE6!3zpBLCv??bxF{dmnE!;fUtem)7@ur; zyZTM3F>jeM`yB3q)QyiX*0@^C4XV#(Iea!^M^3TSg1=imw_V)B{6o_XWafh|Z6L|O m@E;0T_L_=<_}@#`^iE-9V2}c0Hjn@aFuq`hFh)sO2mk<31uz@{ literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/full_sync_core_get_assets_by_authority/4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz b/integration_tests/src/data/accounts/full_sync_core_get_assets_by_authority/4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz new file mode 100644 index 0000000000000000000000000000000000000000..82467c8aadc2e5dd3fa9d6a6190758f88ae3163e GIT binary patch literal 256 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQGzjFGw%~Pli^BPwFOl!&vazS?yk4{E zkyX3>YDuf93Gdg3tQOcieW}xedEYnB=)B8wdCHPAFYhlqd$Mkyu5$Ep4-KFh3|a;t zlHorTgfBJ~1@X_^Z0()G2x2iXU(n7~`-{#vc^*nPTu)uHIC*~R@ zYrSWVb64>L6@{c0mnb;r=j5a&mt^MW$pM8jN=gcft@QO%D-v@Ha#Hn@^K%XlJSuL=4`ckI_^S*DM(Rr8W@{}cKUfy4J_GH~YUFGQI9vVP17_RnmDX0@t}a3DhsPx=z2kNtC67!{;sTlyl3HA%;8 literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/full_sync_core_get_assets_by_group/J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264 b/integration_tests/src/data/accounts/full_sync_core_get_assets_by_group/J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264 new file mode 100644 index 0000000000000000000000000000000000000000..281ef1769736cc8799a00a25944f6a4ad989bd0f GIT binary patch literal 256 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQ{L9%jcM8wT2MJc6M0;#2w^&aNdUKRZ zckkxvEvvjW-o9KPvRYv8^rcP<=6&Bhqw_A$E$7;qZ#BhDE3K`*U0t$NV%@_Q4v$M#ddKZPN*LPXtK<=P#Z4=%t-f7dvQ%Q-!xj#Y zOICWv?LJB#nV_^jWVOKF=}Vm!%=^B1M(16g%Ttz|d3k@?*^_nqbd{r*duRa7xKJer zB!Ol@f#DKUQ4l{{sl9g!BZ$SoFdv9nyC?q-V=eJ2Ep$BhZNBYN&od_i3;ecyVy;oL z)_dkScNITSQAlcWiGp)}PEKlaNoIZ?$mEQYl7eC@ef`vm#N2|MRK4WWIY3TB`R zFwOxoK!62Ifb3;qu4UzxxsqJ{Vrk@&&4FbsjyvuNHnm6amUm6BRnmDX0@t}a3DhsPx=z2kNtC67!{;sTlyl3HA%;8p@NW*(pX>ucTmkktZvr!RF{Fz@^38J%}|E>Br<=H>lmXHVAc(^ZaM?x6uRW5Hx` zAj!b+9|~SBGZh63n0EJ0VFa@n;(!!u_vHU!tR-Hhg^uUG&9`0ZdFDi5f#0@I%r#2Z zde0o^uHpwO3P~+4QE<-B$w^Hv$;{7_0}5r7loS+O>FcLfB<2?6r0ON-=jtcJRe&sj GSONe{kXHEs literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/8qbRNh9Q9pcksZVnmQemoh7is2NqsRNTx4jmpv75knC6 b/integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/8qbRNh9Q9pcksZVnmQemoh7is2NqsRNTx4jmpv75knC6 new file mode 100644 index 0000000000000000000000000000000000000000..dc41d41e3fa5d2b61cbd26cee96e59a704caef02 GIT binary patch literal 224 zcmY#jfB*@G3!Dre_g9_F{q$yi$ZCPT)0a9enD>42jLy3}m!~W_^YZ?(vnT8J=_*Gr_s{^E!JuUT zA{qWe!K-Daq9DGSS$FRgMi7gEArFWd)24bJ?I@yWlw)^Pz%2}vz3QE)6S iPA!oH@-j+F3W}}t^;0Vna|?1(^^)^*^%FsoKsf+y_+uRa literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/CoeHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd b/integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/CoeHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd new file mode 100644 index 0000000000000000000000000000000000000000..cdf1f58de991e14f684dc8c69292df80826314eb GIT binary patch literal 312 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQT%VkKDbT3vjNsa9mYy#joVsX!H7NP| z!6yp#mJD{X&kfk_etUR5b>ZZjryshVdtJFkW=rea_-ONqRy|)^>;5yW)Bu|CK~^3} zGBEsyf~i|gMM3-vE(?37FoIYN3`>Eyq-3`3p|qV-G<%CY9-q~0*>dc$`L#{@LdLcoW)YSC3P6KGGD_1y2I2x- zK#33^nCuB4Bcr6Gpx8=ZKe4DhHL)yJFE6!3zpBLCv??bxF{dmnE!;fUtb%7 literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/HEsxPaf6QFNBaN3LiVQAke99WaFMhT8JC2bWityF7mwZ b/integration_tests/src/data/accounts/full_sync_core_get_assets_by_owner/HEsxPaf6QFNBaN3LiVQAke99WaFMhT8JC2bWityF7mwZ new file mode 100644 index 0000000000000000000000000000000000000000..9bd88853c5e340c5035a69a8091efa30157de520 GIT binary patch literal 424 zcmY#jfB*@G3em)7@ur; zyZTM3F>jeM`yB3q)QyiX*0@^C4XV#(Iea!^M^3TSg1=imw_V)B{6o_XWafh|Z6L|O m@E;1MZZ#DJ@qJwv_D*4BV2}c0Hjn@aFuq`hFh)sO2mk=KyziT5bl&B;JY~t5m-m;QJz2L;S2=pQ2gpo^P+K6$ z!0;am(%%`0g7|ZV%Q~kpGBCUXvKhN4{|{p=@hUBJJojzB?NZM(CjtxnwtZr*QL@&1 z<~Vm1)3)B%*&72d-0ycea*^}!%CIS>iggBS|NgZxJGENjfS)55(3FtW;t~bN;^Nd2 zNgywyq@ = seed_accounts([ + "9CSyGBw1DCVZfx621nb7UBM9SpVDsX1m9MaN6APCf1Ci", + "4FcFVJVPRsYoMjt8ewDGV5nipoK63SNrJzjrBHyXvhcz", + ]); + + single_db_index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + + setup + .synchronizer + .full_syncronize(&shutdown_rx.resubscribe(), AssetType::NonFungible) + .await + .unwrap(); + + let request = r#" + { + "authorityAddress": "APrZTeVysBJqAznfLXS71NAzjr2fCVTSF1A66MeErzM7", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByAuthority = serde_json::from_str(request).unwrap(); + let response = + setup.das_api.get_assets_by_authority(request, mutexed_tasks.clone()).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_full_sync_core_get_assets_by_group() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { network: Some(Network::Devnet), clear_db: true }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7", + "kTMCCKLTaZsnSReer12HsciwScUwhHyZyd9D9BwQF8k", + "EgzsppfYJmUet4ve8MnuHMyvSnj6R7LRmwsGEH5TuGhB", + "J2kazVRuZ33Po4PVyZGxiDYUMQ1eZiT5Xa13usRYo264", + ]); + + single_db_index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + + setup + .synchronizer + .full_syncronize(&shutdown_rx.resubscribe(), AssetType::NonFungible) + .await + .unwrap(); + + let request = r#" + { + "groupKey": "collection", + "groupValue": "JChzyyp1CnNz56tJLteQ5BsbngmWQ3JwcxLZrmuQA5b7", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByGroup = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_assets_by_group(request, mutexed_tasks.clone()).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_full_sync_core_get_assets_by_owner() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { network: Some(Network::Devnet), clear_db: true }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb", + "9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj", + ]); + + single_db_index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + + setup + .synchronizer + .full_syncronize(&shutdown_rx.resubscribe(), AssetType::NonFungible) + .await + .unwrap(); + + let request = r#" + { + "ownerAddress": "7uScVQiT4vArB88dHrZoeVKWbtsRJmNp9r5Gce5VQpXS", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByOwner = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_assets_by_owner(request, mutexed_tasks.clone()).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} + +#[tokio::test] +#[serial] +#[named] +async fn test_full_sync_core_and_regular_nfts_get_assets_by_owner() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { network: Some(Network::Devnet), clear_db: true }, + ) + .await; + + let seeds: Vec = seed_accounts([ + "4FFhh184GNqh3LEK8UhMY7KBuCdNvvhU7C23ZKrKnofb", + "9tsHoBrkSqBW5uMxKZyvxL6m9CCaz1a7sGEg8SuckUj", + // below are account related to regular NFT + "8qbRNh9Q9pcksZVnmQemoh7is2NqsRNTx4jmpv75knC6", // mint account + "CoeHPhsozRMmvJTg2uaNrAmQmjVvLk6PJvoEWDJavuBd", // token account + "DHFGrBUK1Ctgr8RBftsWH952hS69hzesBpnyThWC6MjR", // metadata account + "HEsxPaf6QFNBaN3LiVQAke99WaFMhT8JC2bWityF7mwZ", // master edition account + ]); + + single_db_index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let (shutdown_tx, shutdown_rx) = broadcast::channel::<()>(1); + + setup + .synchronizer + .full_syncronize(&shutdown_rx.resubscribe(), AssetType::NonFungible) + .await + .unwrap(); + + let request = r#" + { + "ownerAddress": "7uScVQiT4vArB88dHrZoeVKWbtsRJmNp9r5Gce5VQpXS", + "sortBy": { + "sortBy": "updated", + "sortDirection": "asc" + }, + "page": 1, + "limit": 50 + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAssetsByOwner = serde_json::from_str(request).unwrap(); + let response = setup.das_api.get_assets_by_owner(request, mutexed_tasks.clone()).await.unwrap(); + insta::assert_json_snapshot!(name, response); +} From f2a45d799e8b5458153fcd7308d60d965fe93935 Mon Sep 17 00:00:00 2001 From: Vadim <31490938+n00m4d@users.noreply.github.com> Date: Fri, 24 Jan 2025 16:45:44 +0100 Subject: [PATCH 19/33] feat: extend range of interfaces for supply object display (#382) --- ...date_tests__account_updates-metadata-updated.snap | 6 +++++- ..._update_tests__account_updates-token-updated.snap | 6 +++++- ...date_tests__account_updates-with-all-updates.snap | 6 +++++- ...tests__account_update_tests__account_updates.snap | 6 +++++- ...tests__general_scenario_tests__asset_parsing.snap | 6 +++++- ...tion_tests__regular_nft_tests__reg_get_asset.snap | 6 +++++- ...tests__reg_get_asset_batch-2-and-a-missing-1.snap | 12 ++++++++++-- ...ests__reg_get_asset_batch-only-2-different-2.snap | 12 ++++++++++-- ...egular_nft_tests__reg_get_asset_batch-only-2.snap | 12 ++++++++++-- ...s__regular_nft_tests__reg_get_asset_by_group.snap | 6 +++++- ..._tests__regular_nft_tests__reg_search_assets.snap | 6 +++++- ...s__regular_nft_tests__regular_nft_collection.snap | 6 +++++- nft_ingester/src/api/dapi/rpc_asset_convertors.rs | 6 +++++- 13 files changed, 80 insertions(+), 16 deletions(-) diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap index 6663274f..ec03270e 100644 --- a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-metadata-updated.snap @@ -64,7 +64,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, "mutable": false, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap index 3f93cdf8..d8b91a55 100644 --- a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-token-updated.snap @@ -64,7 +64,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap index 3ca111f6..880c714e 100644 --- a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates-with-all-updates.snap @@ -64,7 +64,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "1111111QLbz7JHiBTspS962RLKV8GndWFwiEaqKM" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, "mutable": false, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates.snap b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates.snap index be85df53..aac7d56f 100644 --- a/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates.snap +++ b/integration_tests/src/snapshots/integration_tests__account_update_tests__account_updates.snap @@ -64,7 +64,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap b/integration_tests/src/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap index 5cd4f3e8..20aa0fcd 100644 --- a/integration_tests/src/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap +++ b/integration_tests/src/snapshots/integration_tests__general_scenario_tests__asset_parsing.snap @@ -64,7 +64,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "BzbdvwEkQKeghTY53aZxTYjUienhdbkNVkgrLV6cErke" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap index 3bd76795..0fc211e2 100644 --- a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset.snap @@ -70,7 +70,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "A59E2tNJEqNN9TDnzgGnmLmnTsdRDoPocGx3n1w2dqZw" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap index 23e74e6a..8e135d1c 100644 --- a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-2-and-a-missing-1.snap @@ -71,7 +71,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, "mutable": true, "burnt": false, "lamports": 5616720, @@ -153,7 +157,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "3H3d3hfpZVVdVwuFAxDtDSFN2AdR7kwiDA3ynbnbkhc9" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap index df528f42..65550f98 100644 --- a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2-different-2.snap @@ -71,7 +71,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, "mutable": true, "burnt": false, "lamports": 5616720, @@ -152,7 +156,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "3H3d3hfpZVVdVwuFAxDtDSFN2AdR7kwiDA3ynbnbkhc9" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap index dd57b22a..92bbd294 100644 --- a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_batch-only-2.snap @@ -71,7 +71,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "BaBQKh34KrqZzd4ifSHQYMf86HiBGASN6TWUi1ZwfyKv" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 252 + }, "mutable": true, "burnt": false, "lamports": 5616720, @@ -152,7 +156,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "9PacVenjPyQYiWBha89UYRM1nn6mf9bGY7vi32zY6DLn" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap index 53050318..66846213 100644 --- a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_get_asset_by_group.snap @@ -80,7 +80,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "9qUcfdADyrrTSetFjNjF9Ro7LKAqzJkzZV6WKLHfv5MU" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 254 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap index bfebe7d8..47c4a3a8 100644 --- a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__reg_search_assets.snap @@ -75,7 +75,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "6Cr66AabRYymhZgYQSfTCo6FVpH18wXrMZswAbcErpyX" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 255 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap index 42b83705..3d6143ed 100644 --- a/integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap +++ b/integration_tests/src/snapshots/integration_tests__regular_nft_tests__regular_nft_collection.snap @@ -59,7 +59,11 @@ snapshot_kind: text "ownership_model": "single", "owner": "2RtGg6fsFiiF1EQzHqbd66AhW7R5bWeQGpTbv2UMkCdW" }, - "supply": null, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 253 + }, "mutable": true, "burnt": false, "lamports": 5616720, diff --git a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs index 3750a4b6..ba75a46c 100644 --- a/nft_ingester/src/api/dapi/rpc_asset_convertors.rs +++ b/nft_ingester/src/api/dapi/rpc_asset_convertors.rs @@ -366,7 +366,11 @@ pub fn asset_to_rpc( _ => None, }; let supply = match interface { - Interface::V1NFT => { + Interface::V1NFT + | Interface::LegacyNft + | Interface::Nft + | Interface::ProgrammableNFT + | Interface::Custom => { if let Some(edition_info) = &full_asset.edition_data { Some(Supply { edition_nonce, From 4ee5691a5213c4f6731b466d20f754de2d062c5e Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Fri, 24 Jan 2025 17:28:13 +0000 Subject: [PATCH 20/33] [MTG-1258] an option to turn off the json re-download, enabled by default (#384) * an option to turn off the json re-download, enabled by default * timeouts separation * nit rename --- interface/src/json.rs | 4 +++ nft_ingester/src/api/dapi/asset.rs | 7 +++-- nft_ingester/src/bin/api/main.rs | 1 + nft_ingester/src/bin/ingester/main.rs | 1 + nft_ingester/src/bin/slot_checker/main.rs | 5 ++-- nft_ingester/src/config.rs | 16 +++++++++++- nft_ingester/src/json_worker.rs | 31 +++++++++++++++-------- nft_ingester/tests/api_tests.rs | 12 +++++---- 8 files changed, 56 insertions(+), 21 deletions(-) diff --git a/interface/src/json.rs b/interface/src/json.rs index 42fe0bfb..8676fea6 100644 --- a/interface/src/json.rs +++ b/interface/src/json.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use async_trait::async_trait; use mockall::automock; @@ -13,7 +15,9 @@ pub trait JsonDownloader { async fn download_file( &self, url: String, + timeout: Duration, ) -> Result; + fn skip_refresh(&self) -> bool; } #[automock] diff --git a/nft_ingester/src/api/dapi/asset.rs b/nft_ingester/src/api/dapi/asset.rs index 992f2f72..965be925 100644 --- a/nft_ingester/src/api/dapi/asset.rs +++ b/nft_ingester/src/api/dapi/asset.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, string::ToString, sync::Arc}; +use std::{collections::HashMap, string::ToString, sync::Arc, time::Duration}; use entities::{ api_req_params::{AssetSortDirection, Options}, @@ -33,6 +33,7 @@ use crate::api::dapi::rpc_asset_models::FullAsset; pub const COLLECTION_GROUP_KEY: &str = "collection"; pub const METADATA_CACHE_TTL: i64 = 86400; // 1 day +pub const CLIENT_TIMEOUT: Duration = Duration::from_secs(3); fn convert_rocks_asset_model( asset_pubkey: &Pubkey, @@ -230,6 +231,7 @@ pub async fn get_by_ids< let curr_time = chrono::Utc::now().timestamp(); if offchain_data.storage_mutability.is_mutable() && curr_time > offchain_data.last_read_at + METADATA_CACHE_TTL + && !json_downloader.skip_refresh() { download_needed = true; } @@ -267,7 +269,8 @@ pub async fn get_by_ids< let json_downloader = json_downloader.clone(); async move { - let response = json_downloader.download_file(url.clone()).await; + let response = + json_downloader.download_file(url.clone(), CLIENT_TIMEOUT).await; (url, response) } }) diff --git a/nft_ingester/src/bin/api/main.rs b/nft_ingester/src/bin/api/main.rs index 1ecffcab..f622ff42 100644 --- a/nft_ingester/src/bin/api/main.rs +++ b/nft_ingester/src/bin/api/main.rs @@ -102,6 +102,7 @@ pub async fn main() -> Result<(), IngesterError> { json_downloader_metrics.clone(), red_metrics.clone(), args.parallel_json_downloaders, + args.api_skip_inline_json_refresh.unwrap_or_default(), ) .await, )) diff --git a/nft_ingester/src/bin/ingester/main.rs b/nft_ingester/src/bin/ingester/main.rs index 6cee5f9b..e23be7ca 100644 --- a/nft_ingester/src/bin/ingester/main.rs +++ b/nft_ingester/src/bin/ingester/main.rs @@ -281,6 +281,7 @@ pub async fn main() -> Result<(), IngesterError> { metrics_state.json_downloader_metrics.clone(), metrics_state.red_metrics.clone(), args.parallel_json_downloaders, + args.api_skip_inline_json_refresh.unwrap_or_default(), ) .await, ); diff --git a/nft_ingester/src/bin/slot_checker/main.rs b/nft_ingester/src/bin/slot_checker/main.rs index dda51445..0630329e 100644 --- a/nft_ingester/src/bin/slot_checker/main.rs +++ b/nft_ingester/src/bin/slot_checker/main.rs @@ -15,7 +15,8 @@ use indicatif::{ProgressBar, ProgressStyle}; use interface::slots_dumper::SlotsDumper; use metrics_utils::MetricState; use rocks_db::{ - column::TypedColumn, columns::offchain_data::OffChainData, migrator::MigrationVersions, Storage, + column::TypedColumn, columns::offchain_data::OffChainDataDeprecated, + migrator::MigrationVersions, Storage, }; use tokio::{ signal, @@ -113,7 +114,7 @@ async fn main() -> Result<(), Box> { let db = Arc::new( Storage::open_readonly_with_cfs( &args.target_db_path, - vec![RawBlock::NAME, MigrationVersions::NAME, OffChainData::NAME], + vec![RawBlock::NAME, MigrationVersions::NAME, OffChainDataDeprecated::NAME], Arc::new(tokio::sync::Mutex::new(tokio::task::JoinSet::new())), metrics_state.red_metrics, ) diff --git a/nft_ingester/src/config.rs b/nft_ingester/src/config.rs index 22bccfe5..fc2eb56a 100644 --- a/nft_ingester/src/config.rs +++ b/nft_ingester/src/config.rs @@ -70,6 +70,14 @@ pub struct IngesterClapArgs { #[clap(long, env, default_value = "100")] pub parallel_json_downloaders: i32, + #[clap( + long, + env, + default_value = "true", + help = "Skip inline json refreshes if the metadata may be stale" + )] + pub api_skip_inline_json_refresh: Option, + #[clap( long("run-api"), default_value = "true", @@ -506,7 +514,13 @@ pub struct ApiClapArgs { pub json_middleware_config: Option, #[clap(long, env, default_value = "100")] pub parallel_json_downloaders: i32, - + #[clap( + long, + env, + default_value = "true", + help = "Skip inline json refreshes if the metadata may be stale" + )] + pub api_skip_inline_json_refresh: Option, #[clap(long, env, default_value = "info", help = "info|debug")] pub log_level: String, } diff --git a/nft_ingester/src/json_worker.rs b/nft_ingester/src/json_worker.rs index f3418f77..b344b42e 100644 --- a/nft_ingester/src/json_worker.rs +++ b/nft_ingester/src/json_worker.rs @@ -17,7 +17,7 @@ use serde_json::Value; use tokio::{ sync::{broadcast::Receiver, mpsc, mpsc::error::TryRecvError, Mutex}, task::JoinSet, - time::{self, Duration, Instant}, + time::{Duration, Instant}, }; use tracing::{debug, error}; use url::Url; @@ -27,12 +27,13 @@ use crate::api::dapi::rpc_asset_convertors::parse_files; pub const JSON_BATCH: usize = 300; pub const WIPE_PERIOD_SEC: u64 = 60; pub const SLEEP_TIME: u64 = 1; -pub const CLIENT_TIMEOUT: u64 = 5; +pub const CLIENT_TIMEOUT: Duration = Duration::from_secs(30); pub struct JsonWorker { pub db_client: Arc, pub rocks_db: Arc, pub num_of_parallel_workers: i32, + pub should_skip_refreshes: bool, pub metrics: Arc, pub red_metrics: Arc, } @@ -44,10 +45,12 @@ impl JsonWorker { metrics: Arc, red_metrics: Arc, parallel_json_downloaders: i32, + should_skip_refreshes: bool, ) -> Self { Self { db_client, num_of_parallel_workers: parallel_json_downloaders, + should_skip_refreshes, metrics, red_metrics, rocks_db, @@ -214,8 +217,9 @@ pub async fn run(json_downloader: Arc, rx: Receiver<()>) { let begin_processing = Instant::now(); - let response = - json_downloader.download_file(task.metadata_url.clone()).await; + let response = json_downloader + .download_file(task.metadata_url.clone(), CLIENT_TIMEOUT) + .await; json_downloader.metrics.set_latency_task_executed( "json_downloader", @@ -251,14 +255,15 @@ pub async fn run(json_downloader: Arc, rx: Receiver<()>) { #[async_trait] impl JsonDownloader for JsonWorker { - async fn download_file(&self, url: String) -> Result { + async fn download_file( + &self, + url: String, + timeout: Duration, + ) -> Result { let start_time = chrono::Utc::now(); - let client = ClientBuilder::new() - .timeout(time::Duration::from_secs(CLIENT_TIMEOUT)) - .build() - .map_err(|e| { - JsonDownloaderError::ErrorDownloading(format!("Failed to create client: {:?}", e)) - })?; + let client = ClientBuilder::new().timeout(timeout).build().map_err(|e| { + JsonDownloaderError::ErrorDownloading(format!("Failed to create client: {:?}", e)) + })?; // Detect if the URL is an IPFS link let parsed_url = if url.starts_with("ipfs://") { @@ -341,6 +346,10 @@ impl JsonDownloader for JsonWorker { }, } } + + fn skip_refresh(&self) -> bool { + self.should_skip_refreshes + } } #[async_trait] diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index fa0a4851..d652ca50 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -1,7 +1,7 @@ #[cfg(test)] #[cfg(feature = "integration_tests")] mod tests { - use std::{collections::HashMap, str::FromStr, sync::Arc}; + use std::{collections::HashMap, str::FromStr, sync::Arc, time::Duration}; use base64::{engine::general_purpose, Engine}; use blockbuster::{ @@ -2308,11 +2308,13 @@ mod tests { "#; let mut mock_middleware = MockJsonDownloader::new(); - mock_middleware.expect_download_file().with(predicate::eq(url)).times(1).returning( - move |_| { + mock_middleware + .expect_download_file() + .with(predicate::eq(url), predicate::eq(Duration::from_secs(3))) + .times(1) + .returning(move |_, _| { Ok(interface::json::JsonDownloadResult::JsonContent(offchain_data.to_string())) - }, - ); + }); let api = nft_ingester::api::api_impl::DasApi::< MaybeProofChecker, From 0e29a1081442d65fe70da99c9252f3e6baae58b3 Mon Sep 17 00:00:00 2001 From: Andrii <18900364+andrii-kl@users.noreply.github.com> Date: Fri, 24 Jan 2025 20:38:14 +0100 Subject: [PATCH 21/33] =?UTF-8?q?MTG-1242=20Fix=20build=20search=20query?= =?UTF-8?q?=20for=20Fungible=20tokens.=20MTG-1031=20Fix=20tes=E2=80=A6=20(?= =?UTF-8?q?#379)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * MTG-1242 Fix build search query for Fungible tokens. MTG-1031 Fix test for fungible tokens. * MTG-1242_MTG-1031 Fix issue * MTG-1245 Fixed integration api test for Fungible tokens, fixed related bugs. - Fixed integration test_token_type test. - Fixed show_fungible option for SearchAssets requests. - Removed switching TokenType depending on the show_fungible option. * MTG-1245 Rollback some changes * MTG-1245 Rollback some changes * MTG-1245 fix ftm --- nft_ingester/src/api/dapi/asset.rs | 2 + nft_ingester/src/api/dapi/search_assets.rs | 19 +-- nft_ingester/src/bin/synchronizer/main.rs | 4 +- nft_ingester/src/bin/synchronizer/readme.md | 39 ++++++ nft_ingester/src/index_syncronizer.rs | 78 ++++++------ .../account_based/token_updates_processor.rs | 1 + nft_ingester/tests/api_tests.rs | 111 ++++++++++++------ postgre-client/src/asset_filter_client.rs | 10 ++ tests/setup/src/rocks.rs | 4 + 9 files changed, 191 insertions(+), 77 deletions(-) create mode 100644 nft_ingester/src/bin/synchronizer/readme.md diff --git a/nft_ingester/src/api/dapi/asset.rs b/nft_ingester/src/api/dapi/asset.rs index 965be925..fdd8290e 100644 --- a/nft_ingester/src/api/dapi/asset.rs +++ b/nft_ingester/src/api/dapi/asset.rs @@ -102,6 +102,7 @@ fn convert_rocks_asset_model( token_account: asset_selected_maps.token_accounts.get(asset_pubkey).cloned(), inscription, spl_mint: asset_selected_maps.spl_mints.get(asset_pubkey).cloned(), + token_symbol: token_symbols.get(&asset_pubkey.to_string()).cloned(), token_price: token_prices.get(&asset_pubkey.to_string()).cloned(), }) @@ -193,6 +194,7 @@ pub async fn get_by_ids< let unique_asset_ids: Vec = unique_asset_ids_map.keys().cloned().collect(); let asset_ids_string = asset_ids.clone().into_iter().map(|id| id.to_string()).collect_vec(); + let (token_prices, token_symbols) = if options.show_fungible { let token_prices_fut = token_price_fetcher.fetch_token_prices(asset_ids_string.as_slice()); let token_symbols_fut = diff --git a/nft_ingester/src/api/dapi/search_assets.rs b/nft_ingester/src/api/dapi/search_assets.rs index e4869a8c..7e4fa8d3 100644 --- a/nft_ingester/src/api/dapi/search_assets.rs +++ b/nft_ingester/src/api/dapi/search_assets.rs @@ -108,7 +108,7 @@ async fn fetch_assets< JP: JsonPersister + Sync + Send + 'static, PPC: ProcessingPossibilityChecker + Sync + Send + 'static, >( - index_client: Arc, + pg_index_client: Arc, rocks_db: Arc, filter: SearchAssetsQuery, sort_by: AssetSorting, @@ -145,7 +145,7 @@ async fn fetch_assets< } }; - let keys = index_client + let keys = pg_index_client .get_asset_pubkeys_filtered(filter, &sort_by.into(), limit, page, before, after, &options) .await .map_err(|e| { @@ -206,13 +206,14 @@ async fn fetch_assets< }; let mut grand_total = None; if options.show_grand_total { - grand_total = Some(index_client.get_grand_total(filter, &options).await.map_err(|e| { - if e.to_string().contains("statement timeout") { - StorageError::QueryTimedOut - } else { - StorageError::Common(e.to_string()) - } - })?) + grand_total = + Some(pg_index_client.get_grand_total(filter, &options).await.map_err(|e| { + if e.to_string().contains("statement timeout") { + StorageError::QueryTimedOut + } else { + StorageError::Common(e.to_string()) + } + })?) } let resp = AssetList { diff --git a/nft_ingester/src/bin/synchronizer/main.rs b/nft_ingester/src/bin/synchronizer/main.rs index 97af67ab..b89c6f90 100644 --- a/nft_ingester/src/bin/synchronizer/main.rs +++ b/nft_ingester/src/bin/synchronizer/main.rs @@ -43,7 +43,7 @@ pub async fn main() -> Result<(), IngesterError> { red_metrics.register(&mut registry); metrics_utils::utils::start_metrics(registry, args.metrics_port).await; - let index_storage = Arc::new( + let pg_index_storage = Arc::new( init_index_storage_with_migration( &args.pg_database_url, args.pg_max_db_connections, @@ -91,7 +91,7 @@ pub async fn main() -> Result<(), IngesterError> { let synchronizer = Arc::new(Synchronizer::new( rocks_storage.clone(), - index_storage.clone(), + pg_index_storage.clone(), args.dump_synchronizer_batch_size, args.rocks_dump_path.clone(), metrics.clone(), diff --git a/nft_ingester/src/bin/synchronizer/readme.md b/nft_ingester/src/bin/synchronizer/readme.md new file mode 100644 index 00000000..aaa71925 --- /dev/null +++ b/nft_ingester/src/bin/synchronizer/readme.md @@ -0,0 +1,39 @@ +## Building the Project + +Clone the repository and navigate to the project directory: + +```bash +git clone https://github.com/metaplex-foundation/aura.git +cd nft_ingester +``` + +Build the project using Cargo: + +```bash +cargo build --bin synchronizer +``` + +## Running the Service + +Run to see the full list of available arguments: + +```bash +./target/debug/synchronizer -h +``` + +Run Synchronizer with minimum functionality. + +```bash +./target/debug/synchronizer \ + --pg-database-url postgres://solana:solana@localhost:5432/aura_db +``` + + +## Tips for local debugging/testing + +To increase log verbosity, set the log level to debug: +` --log-level debug` + +To fill the local Redis with messages you can use any other Redis that is available. +There is a script that will copy existing messages from one Redis and forward copies of these messages to another one. +`nft_ingester/scripts/transfer_redis_messages.py` \ No newline at end of file diff --git a/nft_ingester/src/index_syncronizer.rs b/nft_ingester/src/index_syncronizer.rs index 53e746ff..7fe7c6cd 100644 --- a/nft_ingester/src/index_syncronizer.rs +++ b/nft_ingester/src/index_syncronizer.rs @@ -34,8 +34,8 @@ where T: AssetIndexSourceStorage, U: AssetIndexStorage, { - primary_storage: Arc, - index_storage: Arc, + rocks_primary_storage: Arc, + pg_index_storage: Arc, dump_synchronizer_batch_size: usize, dump_path: String, metrics: Arc, @@ -49,16 +49,16 @@ where { #[allow(clippy::too_many_arguments)] pub fn new( - primary_storage: Arc, - index_storage: Arc, + rocks_primary_storage: Arc, + pg_index_storage: Arc, dump_synchronizer_batch_size: usize, dump_path: String, metrics: Arc, parallel_tasks: usize, ) -> Self { Synchronizer { - primary_storage, - index_storage, + rocks_primary_storage, + pg_index_storage, dump_synchronizer_batch_size, dump_path, metrics, @@ -109,16 +109,16 @@ where run_full_sync_threshold: i64, asset_type: AssetType, ) -> Result { - let last_indexed_key = self.index_storage.fetch_last_synced_id(asset_type).await?; + let last_indexed_key = self.pg_index_storage.fetch_last_synced_id(asset_type).await?; let last_indexed_key = last_indexed_key.map(decode_u64x2_pubkey).transpose()?; // Fetch the last known key from the primary storage let (last_key, prefix) = match asset_type { AssetType::NonFungible => { - (self.primary_storage.last_known_nft_asset_updated_key()?, "nft") + (self.rocks_primary_storage.last_known_nft_asset_updated_key()?, "nft") }, AssetType::Fungible => { - (self.primary_storage.last_known_fungible_asset_updated_key()?, "fungible") + (self.rocks_primary_storage.last_known_fungible_asset_updated_key()?, "fungible") }, }; let Some(last_key) = last_key else { @@ -195,13 +195,17 @@ where let state = self.get_sync_state(run_full_sync_threshold, asset_type).await?; match state { SyncStatus::FullSyncRequired(state) => { - tracing::info!("Should run dump synchronizer as the difference between last indexed and last known sequence is greater than the threshold. Last indexed: {:?}, Last known: {}", state.last_indexed_key.clone().map(|k|k.seq), state.last_known_key.seq); + tracing::warn!("Should run dump synchronizer as the difference between last indexed and last known sequence is greater than the threshold. Last indexed: {:?}, Last known: {}", state.last_indexed_key.clone().map(|k|k.seq), state.last_known_key.seq); self.regular_nft_syncronize(rx, state.last_indexed_key, state.last_known_key).await }, SyncStatus::RegularSyncRequired(state) => { + tracing::debug!("Regular sync required for nft asset"); self.regular_nft_syncronize(rx, state.last_indexed_key, state.last_known_key).await }, - SyncStatus::NoSyncRequired => Ok(()), + SyncStatus::NoSyncRequired => { + tracing::debug!("No sync required for nft asset"); + Ok(()) + }, } } @@ -216,15 +220,19 @@ where match state { SyncStatus::FullSyncRequired(state) => { - tracing::info!("Should run dump synchronizer as the difference between last indexed and last known sequence is greater than the threshold. Last indexed: {:?}, Last known: {}", state.last_indexed_key.clone().map(|k|k.seq), state.last_known_key.seq); + tracing::warn!("Should run dump synchronizer as the difference between last indexed and last known sequence is greater than the threshold. Last indexed: {:?}, Last known: {}", state.last_indexed_key.clone().map(|k|k.seq), state.last_known_key.seq); self.regular_fungible_syncronize(rx, state.last_indexed_key, state.last_known_key) .await }, SyncStatus::RegularSyncRequired(state) => { + tracing::debug!("Regular sync required for fungible asset"); self.regular_fungible_syncronize(rx, state.last_indexed_key, state.last_known_key) .await }, - SyncStatus::NoSyncRequired => Ok(()), + SyncStatus::NoSyncRequired => { + tracing::debug!("No sync required for fungible asset"); + Ok(()) + }, } } @@ -234,8 +242,12 @@ where asset_type: AssetType, ) -> Result<(), IngesterError> { let last_known_key = match asset_type { - AssetType::NonFungible => self.primary_storage.last_known_nft_asset_updated_key()?, - AssetType::Fungible => self.primary_storage.last_known_fungible_asset_updated_key()?, + AssetType::NonFungible => { + self.rocks_primary_storage.last_known_nft_asset_updated_key()? + }, + AssetType::Fungible => { + self.rocks_primary_storage.last_known_fungible_asset_updated_key()? + }, }; let Some(last_known_key) = last_known_key else { return Ok(()); @@ -273,7 +285,7 @@ where num_shards: u64, ) -> Result<(), IngesterError> { let base_path = std::path::Path::new(self.dump_path.as_str()); - self.index_storage.destructive_prep_to_batch_nft_load().await?; + self.pg_index_storage.destructive_prep_to_batch_nft_load().await?; let shards = shard_pubkeys(num_shards); type ResultWithPaths = Result<(usize, String, String, String, String), String>; @@ -322,7 +334,7 @@ where let end = *end; let shutdown_rx = rx.resubscribe(); let metrics = self.metrics.clone(); - let rocks_storage = self.primary_storage.clone(); + let rocks_storage = self.rocks_primary_storage.clone(); tasks.spawn_blocking(move || { let res = rocks_storage.dump_nft_csv( assets_file, @@ -350,7 +362,7 @@ where while let Some(task) = tasks.join_next().await { let (_cnt, assets_path, creators_path, authorities_path, metadata_path) = task.map_err(|e| e.to_string())??; - let index_storage = self.index_storage.clone(); + let index_storage = self.pg_index_storage.clone(); let semaphore = semaphore.clone(); index_tasks.spawn(async move { index_storage @@ -368,9 +380,9 @@ where task.map_err(|e| e.to_string())?.map_err(|e| e.to_string())?; } tracing::info!("All NFT assets loads complete. Finalizing the batch load"); - self.index_storage.finalize_batch_nft_load().await?; + self.pg_index_storage.finalize_batch_nft_load().await?; tracing::info!("Batch load finalized for NFTs"); - self.index_storage + self.pg_index_storage .update_last_synced_key(last_included_rocks_key, AssetType::NonFungible) .await?; Ok(()) @@ -383,7 +395,7 @@ where num_shards: u64, ) -> Result<(), IngesterError> { let base_path = std::path::Path::new(self.dump_path.as_str()); - self.index_storage.destructive_prep_to_batch_fungible_load().await?; + self.pg_index_storage.destructive_prep_to_batch_fungible_load().await?; let shards = shard_pubkeys(num_shards); let mut tasks: JoinSet> = JoinSet::new(); @@ -405,7 +417,7 @@ where let end = *end; let shutdown_rx = rx.resubscribe(); let metrics = self.metrics.clone(); - let rocks_storage = self.primary_storage.clone(); + let rocks_storage = self.rocks_primary_storage.clone(); tasks.spawn_blocking(move || { let res = rocks_storage.dump_fungible_csv( @@ -423,7 +435,7 @@ where let semaphore = Arc::new(tokio::sync::Semaphore::new(1)); while let Some(task) = tasks.join_next().await { let (_cnt, fungible_tokens_path) = task.map_err(|e| e.to_string())??; - let index_storage = self.index_storage.clone(); + let index_storage = self.pg_index_storage.clone(); let semaphore = semaphore.clone(); index_tasks.spawn(async move { index_storage @@ -435,9 +447,9 @@ where task.map_err(|e| e.to_string())?.map_err(|e| e.to_string())?; } tracing::info!("All token accounts/fungibles loads complete. Finalizing the batch load"); - self.index_storage.finalize_batch_fungible_load().await?; + self.pg_index_storage.finalize_batch_fungible_load().await?; tracing::info!("Batch load finalized for fungibles"); - self.index_storage + self.pg_index_storage .update_last_synced_key(last_included_rocks_key, AssetType::Fungible) .await?; Ok(()) @@ -461,7 +473,7 @@ where break; } let (updated_keys, last_included_key) = - self.primary_storage.fetch_fungible_asset_updated_keys( + self.rocks_primary_storage.fetch_fungible_asset_updated_keys( starting_key.clone(), Some(last_key.clone()), self.dump_synchronizer_batch_size, @@ -482,8 +494,8 @@ where // Update the asset indexes in the index storage // let last_included_key = AssetsUpdateIdx::encode_key(last_included_key); last_included_rocks_key = Some(last_included_key); - let primary_storage = self.primary_storage.clone(); - let index_storage = self.index_storage.clone(); + let primary_storage = self.rocks_primary_storage.clone(); + let index_storage = self.pg_index_storage.clone(); let metrics = self.metrics.clone(); tasks.spawn(async move { Self::syncronize_fungible_batch( @@ -518,7 +530,7 @@ where last_included_rocks_key.slot, last_included_rocks_key.pubkey, ); - self.index_storage + self.pg_index_storage .update_last_synced_key(&last_included_rocks_key, AssetType::Fungible) .await?; } else { @@ -550,7 +562,7 @@ where break; } let (updated_keys, last_included_key) = - self.primary_storage.fetch_nft_asset_updated_keys( + self.rocks_primary_storage.fetch_nft_asset_updated_keys( starting_key.clone(), Some(last_key.clone()), self.dump_synchronizer_batch_size, @@ -571,8 +583,8 @@ where // Update the asset indexes in the index storage // let last_included_key = AssetsUpdateIdx::encode_key(last_included_key); last_included_rocks_key = Some(last_included_key); - let primary_storage = self.primary_storage.clone(); - let index_storage = self.index_storage.clone(); + let primary_storage = self.rocks_primary_storage.clone(); + let index_storage = self.pg_index_storage.clone(); let metrics = self.metrics.clone(); tasks.spawn(async move { Self::syncronize_nft_batch( @@ -607,7 +619,7 @@ where last_included_rocks_key.slot, last_included_rocks_key.pubkey, ); - self.index_storage + self.pg_index_storage .update_last_synced_key(&last_included_rocks_key, AssetType::NonFungible) .await?; } else { diff --git a/nft_ingester/src/processors/account_based/token_updates_processor.rs b/nft_ingester/src/processors/account_based/token_updates_processor.rs index 614ad96a..14d170a1 100644 --- a/nft_ingester/src/processors/account_based/token_updates_processor.rs +++ b/nft_ingester/src/processors/account_based/token_updates_processor.rs @@ -143,6 +143,7 @@ impl TokenAccountsProcessor { .map_err(|e| StorageError::Common(e.to_string())) }) .transpose()?; + let asset_dynamic_details = AssetDynamicDetails { pubkey: mint.pubkey, supply: Some(Updated::new( diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index d652ca50..885a238f 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -72,7 +72,7 @@ mod tests { Storage, ToFlatbuffersConverter, }; use serde_json::{json, Value}; - use setup::rocks::RocksTestEnvironment; + use setup::rocks::{RocksTestEnvironment, RocksTestEnvironmentSetup}; use solana_program::pubkey::Pubkey; use solana_sdk::signature::Signature; use spl_pod::{ @@ -92,6 +92,7 @@ mod tests { 6, 155, 136, 87, 254, 171, 129, 132, 251, 104, 127, 99, 70, 24, 192, 53, 218, 196, 57, 220, 26, 235, 59, 85, 152, 160, 240, 0, 0, 0, 0, 1, ]); + #[tokio::test] #[tracing_test::traced_test] async fn test_search_assets() { @@ -2991,11 +2992,11 @@ mod tests { } #[tokio::test(flavor = "multi_thread")] - #[ignore = "FIXME: mismatched number of tokens"] async fn test_token_type() { let cnt = 100; let cli = Cli::default(); let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, 100).await; + let synchronizer = nft_ingester::index_syncronizer::Synchronizer::new( env.rocks_env.storage.clone(), env.pg_env.client.clone(), @@ -3004,7 +3005,7 @@ mod tests { Arc::new(SynchronizerMetricsConfig::new()), 1, ); - let fungible_token_mint1 = generated_assets.pubkeys[0]; // non-existed token + let fungible_token_mint1 = Pubkey::new_unique(); // non-existed token let fungible_token_mint2 = Pubkey::from_str("METAewgxyPbgwsseH8T16a39CQ5VyVxZi9zXiDPY18m").unwrap(); // MPLX token let mint1 = Mint { @@ -3058,7 +3059,27 @@ mod tests { write_version: 10, }; - let ftm_complete = AssetCompleteDetails { + let ftm_complete1 = AssetCompleteDetails { + pubkey: fungible_token_mint1, + static_details: Some(AssetStaticDetails { + pubkey: fungible_token_mint1, + specification_asset_class: SpecificationAssetClass::FungibleAsset, + royalty_target_type: RoyaltyTargetType::Single, + created_at: 10, + edition_address: None, + }), + owner: Some(AssetOwner { + pubkey: fungible_token_mint1, + owner: Updated::new(10, Some(UpdateVersion::WriteVersion(10)), None), + delegate: Default::default(), + owner_type: Default::default(), + owner_delegate_seq: Default::default(), + is_current_owner: Default::default(), + }), + ..Default::default() + }; + + let ftm_complete2 = AssetCompleteDetails { pubkey: fungible_token_mint2, static_details: Some(AssetStaticDetails { pubkey: fungible_token_mint2, @@ -3078,15 +3099,26 @@ mod tests { ..Default::default() }; + env.rocks_env + .storage + .db + .put_cf( + &env.rocks_env.storage.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + fungible_token_mint1, + ftm_complete1.convert_to_fb_bytes(), + ) + .unwrap(); + env.rocks_env .storage .db .put_cf( &env.rocks_env.storage.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), fungible_token_mint2, - ftm_complete.convert_to_fb_bytes(), + ftm_complete2.convert_to_fb_bytes(), ) .unwrap(); + let mut batch_storage = BatchSaveStorage::new( env.rocks_env.storage.clone(), 10, @@ -3186,29 +3218,31 @@ mod tests { // so this token contain info about symbol and price // and 1 non-existed token, so response for it do not include such info assert_eq!(res.items.len(), 2); - assert_eq!(res.items[0].clone().token_info.unwrap().symbol.unwrap(), "MPLX".to_string()); - assert_eq!( - res.items[0].clone().token_info.unwrap().associated_token_address.unwrap(), - fungible_token_account2.to_string() - ); - assert_eq!( - res.items[0].clone().token_info.unwrap().price_info.unwrap().currency.unwrap(), - "USDC".to_string() - ); - assert!( - res.items[0].clone().token_info.unwrap().price_info.unwrap().total_price.unwrap() > 0.0 - ); - assert!( - res.items[0].clone().token_info.unwrap().price_info.unwrap().price_per_token.unwrap() - > 0.0 - ); - - assert!(res.items[1].clone().token_info.unwrap().symbol.is_none()); - assert_eq!( - res.items[1].clone().token_info.unwrap().associated_token_address.unwrap(), - fungible_token_account1.to_string() - ); - assert!(res.items[1].clone().token_info.unwrap().price_info.is_none()); + // + // todo MTG-1263 part related to the show_fungible functionality, that shouldn't work for SearchAssets and some work is needed. + // assert_eq!(res.items[0].clone().token_info.unwrap().symbol.unwrap(), "MPLX".to_string()); + // assert_eq!( + // res.items[0].clone().token_info.unwrap().associated_token_address.unwrap(), + // fungible_token_account2.to_string() + // ); + // assert_eq!( + // res.items[0].clone().token_info.unwrap().price_info.unwrap().currency.unwrap(), + // "USDC".to_string() + // ); + // assert!( + // res.items[0].clone().token_info.unwrap().price_info.unwrap().total_price.unwrap() > 0.0 + // ); + // assert!( + // res.items[0].clone().token_info.unwrap().price_info.unwrap().price_per_token.unwrap() + // > 0.0 + // ); + // + // assert!(res.items[1].clone().token_info.unwrap().symbol.is_none()); + // assert!(res.items[1].clone().token_info.unwrap().price_info.is_none()); + // assert_eq!( + // res.items[1].clone().token_info.unwrap().associated_token_address.unwrap(), + // fungible_token_account1.to_string() + // ); let payload = SearchAssets { limit: Some(1000), @@ -3226,7 +3260,7 @@ mod tests { // We have 1 NonFungible token, created in setup::TestEnvironment::create fn assert_eq!(res.items.len(), 1); - assert!(res.items[0].token_info.is_none()); + // assert!(res.items[0].token_info.is_none()); let payload = SearchAssets { limit: Some(1000), @@ -3261,7 +3295,7 @@ mod tests { // Our NonFungible token is not compressed assert_eq!(res.items.len(), 1); - assert!(res.items[0].token_info.is_none()); + // assert!(res.items[0].token_info.is_none()); let payload = SearchAssets { limit: Some(1000), @@ -3278,7 +3312,7 @@ mod tests { let res = api.search_assets(payload, mutexed_tasks.clone()).await.unwrap(); let res: AssetList = serde_json::from_value(res).unwrap(); - // Totally we have 3 assets with required owner + // Totally we have 3 assets with required owner. show_fungible is false by default, so we don't have token info. assert_eq!(res.items.len(), 3); assert!(res.items[0].mint_extensions.is_none()); assert!(res.items[1].mint_extensions.is_none()); @@ -3438,11 +3472,22 @@ mod tests { } #[tokio::test(flavor = "multi_thread")] - #[ignore = "FIXME: search_assets result returns 0 items"] async fn test_writing_fungible_into_dedicated_table() { let cnt = 100; let cli = Cli::default(); - let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, 100).await; + + let (env, generated_assets) = setup::TestEnvironment::create_and_setup_from_closures( + &cli, + cnt, + 100, + RocksTestEnvironmentSetup::static_data_for_fungible, + RocksTestEnvironmentSetup::with_authority, + RocksTestEnvironmentSetup::test_owner, + RocksTestEnvironmentSetup::dynamic_data, + RocksTestEnvironmentSetup::collection_without_authority, + ) + .await; + let synchronizer = nft_ingester::index_syncronizer::Synchronizer::new( env.rocks_env.storage.clone(), env.pg_env.client.clone(), diff --git a/postgre-client/src/asset_filter_client.rs b/postgre-client/src/asset_filter_client.rs index 68177473..ae9f252f 100644 --- a/postgre-client/src/asset_filter_client.rs +++ b/postgre-client/src/asset_filter_client.rs @@ -209,6 +209,15 @@ fn add_filter_clause<'a>( group_clause_required = true; } + if let Some(ref token_type) = filter.token_type { + if token_type == &TokenType::Fungible && filter.owner_address.is_some() { + query_builder.push( + " INNER JOIN fungible_tokens ON assets_v3.ast_pubkey = fungible_tokens.fbt_asset ", + ); + group_clause_required = true; + } + } + // todo: if we implement the additional params like negata and all/any switch, the true part and the AND prefix should be refactored query_builder.push(" WHERE TRUE "); if let Some(spec_version) = &filter.specification_version { @@ -463,6 +472,7 @@ impl AssetPubkeyFilteredFetcher for PgClient { ) -> Result, IndexDbError> { let (mut query_builder, order_reversed) = Self::build_search_query(filter, order, limit, page, before, after, options)?; + let query = query_builder.build_query_as::(); debug!("SEARCH QUERY: {}", &query.sql()); let start_time = chrono::Utc::now(); diff --git a/tests/setup/src/rocks.rs b/tests/setup/src/rocks.rs index 0f27b0c3..8b2fd08d 100644 --- a/tests/setup/src/rocks.rs +++ b/tests/setup/src/rocks.rs @@ -196,6 +196,10 @@ impl RocksTestEnvironmentSetup { Self::generate_static_data(pubkeys, slot, SpecificationAssetClass::Nft) } + pub fn static_data_for_fungible(pubkeys: &[Pubkey], slot: u64) -> Vec { + Self::generate_static_data(pubkeys, slot, SpecificationAssetClass::FungibleToken) + } + fn generate_static_data( pubkeys: &[Pubkey], slot: u64, From 1adc382ee9f90b74261cf7ba46d44f25728aaa1c Mon Sep 17 00:00:00 2001 From: Stanislav Cherviakov Date: Mon, 27 Jan 2025 17:30:31 +0000 Subject: [PATCH 22/33] hotfix for migrations when the key encoding changes (#386) --- rocks-db/src/migrations/clitems_v2.rs | 3 +- rocks-db/src/migrations/offchain_data.rs | 1 + rocks-db/src/migrator.rs | 11 ++- rocks-db/tests/migration_tests.rs | 95 +++++++++++++++++++++++- 4 files changed, 104 insertions(+), 6 deletions(-) diff --git a/rocks-db/src/migrations/clitems_v2.rs b/rocks-db/src/migrations/clitems_v2.rs index da66ae1e..82ba6c5c 100644 --- a/rocks-db/src/migrations/clitems_v2.rs +++ b/rocks-db/src/migrations/clitems_v2.rs @@ -1,7 +1,7 @@ use entities::models::Updated; use crate::{ - columns::cl_items::{ClItemDeprecated, ClItemV2}, + columns::cl_items::{ClItemDeprecated, ClItemKey, ClItemV2}, migrator::{RocksMigration, SerializationType}, }; @@ -10,6 +10,7 @@ impl RocksMigration for ClItemsV2Migration { const VERSION: u64 = 5; const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; const SERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; + type KeyType = ClItemKey; type NewDataType = ClItemV2; type OldDataType = ClItemDeprecated; } diff --git a/rocks-db/src/migrations/offchain_data.rs b/rocks-db/src/migrations/offchain_data.rs index 21db0d9b..dc6a51d0 100644 --- a/rocks-db/src/migrations/offchain_data.rs +++ b/rocks-db/src/migrations/offchain_data.rs @@ -38,6 +38,7 @@ impl RocksMigration for OffChainDataMigration { const VERSION: u64 = 4; const DESERIALIZATION_TYPE: SerializationType = SerializationType::Bincode; const SERIALIZATION_TYPE: SerializationType = SerializationType::Flatbuffers; + type KeyType = String; type NewDataType = OffChainData; type OldDataType = OffChainDataDeprecated; } diff --git a/rocks-db/src/migrator.rs b/rocks-db/src/migrator.rs index da4e4b2d..ce02836b 100644 --- a/rocks-db/src/migrator.rs +++ b/rocks-db/src/migrator.rs @@ -38,12 +38,17 @@ pub trait RocksMigration { const VERSION: u64; const DESERIALIZATION_TYPE: SerializationType; const SERIALIZATION_TYPE: SerializationType; - type NewDataType: Sync + Serialize + DeserializeOwned + Send + TypedColumn; + type KeyType: 'static + Hash + Eq + std::fmt::Debug; + type NewDataType: Sync + + Serialize + + DeserializeOwned + + Send + + TypedColumn; type OldDataType: Sync + Serialize + DeserializeOwned + Send - + TypedColumn + + TypedColumn + Into<::ValueType>; } @@ -242,7 +247,7 @@ impl<'a> MigrationApplier<'a> { { let mut batch = HashMap::new(); for (key, value) in Self::migration_column_iter::(&temporary_migration_storage.db)? { - let key_decoded = match column.decode_key(key.to_vec()) { + let key_decoded = match M::OldDataType::decode_key(key.to_vec()) { Ok(key_decoded) => key_decoded, Err(e) => { error!("migration data decode_key: {:?}, {}", key.to_vec(), e); diff --git a/rocks-db/tests/migration_tests.rs b/rocks-db/tests/migration_tests.rs index b2d9aec9..d18eb14c 100644 --- a/rocks-db/tests/migration_tests.rs +++ b/rocks-db/tests/migration_tests.rs @@ -1,14 +1,19 @@ #[cfg(test)] mod tests { - use std::sync::Arc; + use std::{str::FromStr, sync::Arc}; + use entities::models::Updated; use metrics_utils::red::RequestErrorDurationMetrics; use rocks_db::{ column::TypedColumn, - columns::offchain_data::{OffChainData, OffChainDataDeprecated}, + columns::{ + cl_items::{ClItemDeprecated, ClItemKey, ClItemV2}, + offchain_data::{OffChainData, OffChainDataDeprecated}, + }, migrator::MigrationState, Storage, }; + use solana_sdk::pubkey::Pubkey; use tempfile::TempDir; use tokio::{sync::Mutex, task::JoinSet}; @@ -112,6 +117,92 @@ mod tests { assert_eq!(migrated_v2.last_read_at, 0); } + #[tokio::test] + async fn test_clitems_v2_migration() { + let dir = TempDir::new().unwrap(); + let node_id = 32782; + let tree_id = Pubkey::from_str("6EdzmXrunmS1gqkuWzDuP94o1YPNc2cb8z45G1eQaMpp") + .expect("a valid pubkey"); + let hash = [ + 93, 208, 232, 135, 101, 117, 109, 249, 149, 77, 57, 114, 173, 168, 145, 196, 185, 190, + 21, 121, 205, 253, 143, 155, 82, 119, 9, 143, 73, 176, 233, 179, + ] + .to_vec(); + let seq = 32; + let v1 = ClItemDeprecated { + cli_node_idx: node_id, + cli_tree_key: tree_id, + cli_leaf_idx: None, + cli_seq: seq, + cli_level: 2, + cli_hash: hash.clone(), + slot_updated: 239021690, + }; + let key = ClItemKey::new(node_id, tree_id); + let path = dir.path().to_str().unwrap(); + let old_storage = Storage::open( + path, + Arc::new(Mutex::new(JoinSet::new())), + Arc::new(RequestErrorDurationMetrics::new()), + MigrationState::Version(0), + ) + .unwrap(); + old_storage.cl_items_deprecated.put(key.clone(), v1.clone()).expect("should put"); + drop(old_storage); + let secondary_storage_dir = TempDir::new().unwrap(); + let migration_version_manager = Storage::open_secondary( + path, + secondary_storage_dir.path().to_str().unwrap(), + Arc::new(Mutex::new(JoinSet::new())), + Arc::new(RequestErrorDurationMetrics::new()), + MigrationState::Version(4), + ) + .unwrap(); + let binding = TempDir::new().unwrap(); + let migration_storage_path = binding.path().to_str().unwrap(); + Storage::apply_all_migrations( + path, + migration_storage_path, + Arc::new(migration_version_manager), + ) + .await + .unwrap(); + + let new_storage = Storage::open( + path, + Arc::new(Mutex::new(JoinSet::new())), + Arc::new(RequestErrorDurationMetrics::new()), + MigrationState::Version(4), + ) + .unwrap(); + let migrated_v1 = new_storage + .db + .get_pinned_cf( + &new_storage.db.cf_handle(ClItemV2::NAME).unwrap(), + ClItemV2::encode_key(key.clone()), + ) + .expect("expect to get value successfully") + .expect("value to be present"); + + print!("migrated is {:?}", migrated_v1.to_vec()); + let migrated_v1 = new_storage + .cl_items + .get(key.clone()) + .expect("should get value successfully") + .expect("the value should be not empty"); + assert_eq!(migrated_v1.finalized_hash, None); + assert_eq!(migrated_v1.leaf_idx, None); + assert_eq!(migrated_v1.level, 2); + assert_eq!( + migrated_v1.pending_hash, + Some(Updated::new( + 239021690, + Some(entities::models::UpdateVersion::Sequence(seq)), + hash + )) + ); + } + #[test] #[ignore = "TODO: test migrations on relevant columns"] fn test_merge_fail() { From 9724ecf38810915c03c8c1f55da3ad9dfdf6a5d2 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Mon, 27 Jan 2025 22:13:55 +0100 Subject: [PATCH 23/33] MTG-1028 Add integration tests for freeze authorities & delegate authorities - cover freeze authorities and delegate authorities with integration tests. - implemented the ability to generate more diverse data for integration tests. --- nft_ingester/tests/api_tests.rs | 806 +++++++++++-------------------- nft_ingester/tests/dump_tests.rs | 15 +- tests/setup/src/lib.rs | 38 +- tests/setup/src/rocks.rs | 96 +++- 4 files changed, 407 insertions(+), 548 deletions(-) diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index 885a238f..bd75b771 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -99,28 +99,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let limit = 10; @@ -472,28 +451,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -611,28 +569,7 @@ mod tests { async fn test_metadata_sanitizer() { let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, 0, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -760,28 +697,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -876,28 +792,8 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -1043,28 +939,8 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -1212,28 +1088,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -1365,28 +1220,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let first_tree = Pubkey::new_unique(); let second_tree = Pubkey::new_unique(); @@ -1578,28 +1412,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let token_updates_processor = TokenAccountsProcessor::new(Arc::new(IngesterMetricsConfig::new())); @@ -1793,28 +1606,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let token_updates_processor = TokenAccountsProcessor::new(Arc::new(IngesterMetricsConfig::new())); @@ -2039,28 +1831,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -2096,28 +1867,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -2154,28 +1904,7 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -2212,28 +1941,8 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, generated_assets) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); @@ -2317,29 +2026,10 @@ mod tests { Ok(interface::json::JsonDownloadResult::JsonContent(offchain_data.to_string())) }); - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - MockJsonDownloader, - MockJsonPersister, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - Some(Arc::new(mock_middleware)), - None, - JsonMiddlewareConfig { is_enabled: true, max_urls_to_parse: 10 }, - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), + let api = create_api( + &env, + Some(JsonMiddlewareConfig { is_enabled: true, max_urls_to_parse: 10 }), ); - let pb = Pubkey::new_unique(); let authority = Pubkey::new_unique(); @@ -2461,28 +2151,8 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - Some(env.rocks_env.storage.clone()), - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + + let api = create_api(&env, None); let asset_id = Pubkey::new_unique(); let tree_id = Pubkey::new_unique(); env.rocks_env @@ -2514,28 +2184,8 @@ mod tests { let cnt = 20; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + + let api = create_api(&env, None); let asset_fees_count = 1000; let mut asset_ids = Vec::with_capacity(asset_fees_count); for _ in 0..asset_fees_count { @@ -2581,28 +2231,8 @@ mod tests { let total_assets = 2000; let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, total_assets, SLOT_UPDATED).await; - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = SearchAssets { @@ -2662,28 +2292,8 @@ mod tests { mock_account_balance_getter .expect_get_account_balance_lamports() .returning(move |_| Ok(10_u64.pow(9))); - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(mock_account_balance_getter), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = SearchAssets { @@ -2786,28 +2396,7 @@ mod tests { ); o.await.unwrap(); - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = GetAsset { @@ -2937,28 +2526,7 @@ mod tests { write_version: 1000, }).unwrap(); - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = GetAsset { @@ -2991,6 +2559,221 @@ mod tests { assert_eq!(res.spl20, None); } + #[tokio::test(flavor = "multi_thread")] + async fn test_search_assets_get_all_spec_classes() { + let tasks = JoinSet::new(); + let mutexed_tasks = Arc::new(Mutex::new(tasks)); + + let number_items = 100; + let cli = Cli::default(); + + let (env, generated_assets) = setup::TestEnvironment::create_and_setup_from_closures( + &cli, + number_items, + 100, + &[ + SpecificationAssetClass::Unknown, + SpecificationAssetClass::ProgrammableNft, + SpecificationAssetClass::Nft, + SpecificationAssetClass::FungibleAsset, + SpecificationAssetClass::FungibleToken, + SpecificationAssetClass::MplCoreCollection, + SpecificationAssetClass::MplCoreAsset, + ], + RocksTestEnvironmentSetup::with_authority, + RocksTestEnvironmentSetup::test_one_owner, + RocksTestEnvironmentSetup::dynamic_data, + RocksTestEnvironmentSetup::collection_without_authority, + ) + .await; + + let api = create_api(&env, None); + let owner = generated_assets.owners[0].owner.value.unwrap(); + + let synchronizer = nft_ingester::index_syncronizer::Synchronizer::new( + env.rocks_env.storage.clone(), + env.pg_env.client.clone(), + 200_000, + "".to_string(), + Arc::new(SynchronizerMetricsConfig::new()), + 1, + ); + + let payload = SearchAssets { + limit: Some(1000), + page: Some(1), + owner_address: Some(owner.to_string()), + options: SearchAssetsOptions { + show_unverified_collections: true, + ..Default::default() + }, + token_type: Some(TokenType::All), + ..Default::default() + }; + let res = api.search_assets(payload, mutexed_tasks.clone()).await.unwrap(); + let res: AssetList = serde_json::from_value(res).unwrap(); + + assert_eq!(res.items.len(), number_items); + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_search_assets_freeze_delegate_authorities_for_fungible() { + let tasks = JoinSet::new(); + let mutexed_tasks = Arc::new(Mutex::new(tasks)); + + let number_items = 100; + let cli = Cli::default(); + + let (env, generated_assets) = + setup::TestEnvironment::create_noise(&cli, number_items, 100).await; + + let fungible_token_mint = Pubkey::new_unique(); + let mint_authority = Pubkey::new_unique(); + let freeze_authority = Pubkey::new_unique(); + let owner = Pubkey::new_unique(); + + let mint = Mint { + pubkey: fungible_token_mint, + supply: 100000, + decimals: 2, + mint_authority: Some(mint_authority.clone()), + freeze_authority: Some(freeze_authority), + token_program: Default::default(), + slot_updated: 10, + write_version: 10, + extensions: None, + }; + + let fungible_token_account = Pubkey::new_unique(); + let delegate = Pubkey::new_unique(); + + let token_account = TokenAccount { + pubkey: fungible_token_account, + mint: fungible_token_mint, + delegate: Some(delegate.clone()), + owner, + extensions: None, + frozen: false, + delegated_amount: 0, + slot_updated: 10, + amount: 0, + write_version: 10, + }; + + let ftm_complete = AssetCompleteDetails { + pubkey: fungible_token_mint, + static_details: Some(AssetStaticDetails { + pubkey: fungible_token_mint, + specification_asset_class: SpecificationAssetClass::FungibleAsset, + royalty_target_type: RoyaltyTargetType::Single, + created_at: 10, + edition_address: None, + }), + owner: Some(AssetOwner { + pubkey: fungible_token_mint, + owner: Updated::new(10, Some(UpdateVersion::WriteVersion(10)), None), + delegate: Default::default(), + owner_type: Default::default(), + owner_delegate_seq: Default::default(), + is_current_owner: Updated::new(12, Some(UpdateVersion::Sequence(12)), true), + }), + ..Default::default() + }; + + env.rocks_env + .storage + .db + .put_cf( + &env.rocks_env.storage.db.cf_handle(AssetCompleteDetails::NAME).unwrap(), + fungible_token_mint, + ftm_complete.convert_to_fb_bytes(), + ) + .unwrap(); + + let mut batch_storage = BatchSaveStorage::new( + env.rocks_env.storage.clone(), + 10, + Arc::new(IngesterMetricsConfig::new()), + ); + + let token_accounts_processor = + TokenAccountsProcessor::new(Arc::new(IngesterMetricsConfig::new())); + + token_accounts_processor + .transform_and_save_fungible_token_account( + &mut batch_storage, + fungible_token_account, + &token_account, + ) + .unwrap(); + + token_accounts_processor + .transform_and_save_mint_account(&mut batch_storage, &mint) + .unwrap(); + batch_storage.flush().unwrap(); + let (_, rx) = tokio::sync::broadcast::channel::<()>(1); + + let synchronizer = nft_ingester::index_syncronizer::Synchronizer::new( + env.rocks_env.storage.clone(), + env.pg_env.client.clone(), + 200_000, + "".to_string(), + Arc::new(SynchronizerMetricsConfig::new()), + 1, + ); + let synchronizer = Arc::new(synchronizer); + let mut tasks = JoinSet::new(); + + for asset_type in ASSET_TYPES { + let rx = rx.resubscribe(); + let synchronizer = synchronizer.clone(); + match asset_type { + AssetType::Fungible => { + tasks.spawn(async move { + synchronizer.synchronize_fungible_asset_indexes(&rx, 0).await + }); + }, + AssetType::NonFungible => { + tasks.spawn( + async move { synchronizer.synchronize_nft_asset_indexes(&rx, 0).await }, + ); + }, + } + } + while let Some(res) = tasks.join_next().await { + if let Err(err) = res { + panic!("{err}"); + } + } + + let payload = SearchAssets { + limit: Some(1000), + page: Some(1), + owner_address: Some(owner.to_string()), + options: SearchAssetsOptions { + show_zero_balance: true, + show_unverified_collections: true, + ..Default::default() + }, + token_type: Some(TokenType::Fungible), + ..Default::default() + }; + + let api = create_api(&env, None); + let res = api.search_assets(payload, mutexed_tasks.clone()).await.unwrap(); + let res: AssetList = serde_json::from_value(res).unwrap(); + + assert_eq!(res.items.len(), 1); + assert_eq!( + res.items[0].clone().token_info.unwrap().mint_authority.unwrap(), + mint_authority.to_string() + ); + assert_eq!( + res.items[0].clone().token_info.unwrap().freeze_authority.unwrap(), + freeze_authority.to_string() + ); + } + #[tokio::test(flavor = "multi_thread")] async fn test_token_type() { let cnt = 100; @@ -3175,28 +2958,7 @@ mod tests { } } - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = SearchAssets { @@ -3413,28 +3175,7 @@ mod tests { .unwrap(); batch_storage.flush().unwrap(); - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = GetAsset { @@ -3480,7 +3221,7 @@ mod tests { &cli, cnt, 100, - RocksTestEnvironmentSetup::static_data_for_fungible, + &[SpecificationAssetClass::FungibleToken], RocksTestEnvironmentSetup::with_authority, RocksTestEnvironmentSetup::test_owner, RocksTestEnvironmentSetup::dynamic_data, @@ -3605,28 +3346,7 @@ mod tests { } } - let api = nft_ingester::api::api_impl::DasApi::< - MaybeProofChecker, - JsonWorker, - JsonWorker, - MockAccountBalanceGetter, - RaydiumTokenPriceFetcher, - Storage, - >::new( - env.pg_env.client.clone(), - env.rocks_env.storage.clone(), - Arc::new(ApiMetricsConfig::new()), - None, - None, - 50, - None, - None, - JsonMiddlewareConfig::default(), - Arc::new(MockAccountBalanceGetter::new()), - None, - Arc::new(RaydiumTokenPriceFetcher::default()), - NATIVE_MINT_PUBKEY.to_string(), - ); + let api = create_api(&env, None); let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = SearchAssets { @@ -3916,4 +3636,32 @@ mod tests { assert_eq!(idx_fungible_asset_iter.count(), 6); assert_eq!(idx_non_fungible_asset_iter.count(), 6); } + + fn create_api( + env: &setup::TestEnvironment, + json_middleware_config: Option, + ) -> nft_ingester::api::api_impl::DasApi< + MaybeProofChecker, + JsonWorker, + JsonWorker, + MockAccountBalanceGetter, + RaydiumTokenPriceFetcher, + Storage, + > { + nft_ingester::api::api_impl::DasApi::new( + env.pg_env.client.clone(), + env.rocks_env.storage.clone(), + Arc::new(ApiMetricsConfig::new()), + None, + None, + 50, + None, + None, + json_middleware_config.unwrap_or(JsonMiddlewareConfig::default()), + Arc::new(MockAccountBalanceGetter::new()), + None, + Arc::new(RaydiumTokenPriceFetcher::default()), + NATIVE_MINT_PUBKEY.to_string(), + ) + } } diff --git a/nft_ingester/tests/dump_tests.rs b/nft_ingester/tests/dump_tests.rs index 9d8e3393..c31a82b9 100644 --- a/nft_ingester/tests/dump_tests.rs +++ b/nft_ingester/tests/dump_tests.rs @@ -123,7 +123,10 @@ mod tests { mod mtg_441_tests { use std::sync::Arc; - use entities::api_req_params::{GetAsset, Options}; + use entities::{ + api_req_params::{GetAsset, Options}, + enums::SpecificationAssetClass, + }; use interface::account_balance::MockAccountBalanceGetter; use metrics_utils::ApiMetricsConfig; use nft_ingester::{ @@ -189,7 +192,7 @@ mod mtg_441_tests { &cli, 20, SLOT_UPDATED, - RocksTestEnvironmentSetup::static_data_for_mpl, + &[SpecificationAssetClass::MplCoreAsset], RocksTestEnvironmentSetup::without_authority, RocksTestEnvironmentSetup::test_owner, RocksTestEnvironmentSetup::dynamic_data, @@ -225,7 +228,7 @@ mod mtg_441_tests { &cli, 20, SLOT_UPDATED, - RocksTestEnvironmentSetup::static_data_for_mpl, + &[SpecificationAssetClass::MplCoreAsset], RocksTestEnvironmentSetup::with_authority, RocksTestEnvironmentSetup::test_owner, RocksTestEnvironmentSetup::dynamic_data, @@ -260,7 +263,7 @@ mod mtg_441_tests { &cli, 20, SLOT_UPDATED, - RocksTestEnvironmentSetup::static_data_for_mpl, + &[SpecificationAssetClass::MplCoreAsset], RocksTestEnvironmentSetup::with_authority, RocksTestEnvironmentSetup::test_owner, RocksTestEnvironmentSetup::dynamic_data, @@ -291,11 +294,11 @@ mod mtg_441_tests { #[tracing_test::traced_test] async fn authority_none_collection_authority_none() { let cli = Cli::default(); - let (env, generated_assets) = setup::TestEnvironment::create_and_setup_from_closures( + let (env, generated_assets) = TestEnvironment::create_and_setup_from_closures( &cli, 20, SLOT_UPDATED, - RocksTestEnvironmentSetup::static_data_for_mpl, + &[SpecificationAssetClass::MplCoreAsset], RocksTestEnvironmentSetup::without_authority, RocksTestEnvironmentSetup::test_owner, RocksTestEnvironmentSetup::dynamic_data, diff --git a/tests/setup/src/lib.rs b/tests/setup/src/lib.rs index 927f1232..18b9f684 100644 --- a/tests/setup/src/lib.rs +++ b/tests/setup/src/lib.rs @@ -4,11 +4,9 @@ pub mod rocks; use std::sync::Arc; -use entities::enums::{AssetType, ASSET_TYPES}; +use entities::enums::{AssetType, SpecificationAssetClass, ASSET_TYPES}; use metrics_utils::MetricsTrait; -use rocks_db::columns::asset::{ - AssetAuthority, AssetCollection, AssetDynamicDetails, AssetOwner, AssetStaticDetails, -}; +use rocks_db::columns::asset::{AssetAuthority, AssetCollection, AssetDynamicDetails, AssetOwner}; use solana_sdk::pubkey::Pubkey; use testcontainers::clients::Cli; use tokio::task::JoinSet; @@ -30,7 +28,33 @@ impl<'a> TestEnvironment<'a> { cli, cnt, slot, - RocksTestEnvironmentSetup::static_data_for_nft, + &[SpecificationAssetClass::Nft], + RocksTestEnvironmentSetup::with_authority, + RocksTestEnvironmentSetup::test_owner, + RocksTestEnvironmentSetup::dynamic_data, + RocksTestEnvironmentSetup::collection_without_authority, + ) + .await + } + + pub async fn create_noise( + cli: &'a Cli, + cnt: usize, + slot: u64, + ) -> (TestEnvironment<'a>, rocks::GeneratedAssets) { + Self::create_and_setup_from_closures( + cli, + cnt, + slot, + &[ + SpecificationAssetClass::Unknown, + SpecificationAssetClass::ProgrammableNft, + SpecificationAssetClass::Nft, + SpecificationAssetClass::FungibleAsset, + SpecificationAssetClass::FungibleToken, + SpecificationAssetClass::MplCoreCollection, + SpecificationAssetClass::MplCoreAsset, + ], RocksTestEnvironmentSetup::with_authority, RocksTestEnvironmentSetup::test_owner, RocksTestEnvironmentSetup::dynamic_data, @@ -44,7 +68,7 @@ impl<'a> TestEnvironment<'a> { cli: &'a Cli, cnt: usize, slot: u64, - static_details: fn(&[Pubkey], u64) -> Vec, + spec_asset_class_list: &[SpecificationAssetClass], authorities: fn(&[Pubkey]) -> Vec, owners: fn(&[Pubkey]) -> Vec, dynamic_details: fn(&[Pubkey], u64) -> Vec, @@ -57,7 +81,7 @@ impl<'a> TestEnvironment<'a> { .generate_from_closure( cnt, slot, - static_details, + spec_asset_class_list, authorities, owners, dynamic_details, diff --git a/tests/setup/src/rocks.rs b/tests/setup/src/rocks.rs index 8b2fd08d..cc1af8fb 100644 --- a/tests/setup/src/rocks.rs +++ b/tests/setup/src/rocks.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use entities::{enums::SpecificationAssetClass, models::Updated}; use metrics_utils::red::RequestErrorDurationMetrics; -use rand::{random, Rng}; +use rand::{random, seq::SliceRandom, Rng}; use rocks_db::{ column::TypedColumn, columns::{ @@ -78,14 +78,21 @@ impl RocksTestEnvironment { &self, cnt: usize, slot: u64, - static_details: fn(&[Pubkey], u64) -> Vec, + spec_asset_class_list: &[SpecificationAssetClass], authorities: fn(&[Pubkey]) -> Vec, owners: fn(&[Pubkey]) -> Vec, dynamic_details: fn(&[Pubkey], u64) -> Vec, collections: fn(&[Pubkey]) -> Vec, ) -> GeneratedAssets { let pubkeys = (0..cnt).map(|_| self.generate_and_store_pubkey(slot)).collect::>(); - let static_details = static_details(&pubkeys, slot); + let static_details = + spec_asset_class_list.iter().cycle().take(cnt).cloned().collect::>(); + + let asset_static_details = RocksTestEnvironmentSetup::generate_static_data_with_asset_list( + &pubkeys, + cnt as u64, + &static_details, + ); let authorities = authorities(&pubkeys); let owners = owners(&pubkeys); let dynamic_details = dynamic_details(&pubkeys, slot); @@ -93,7 +100,7 @@ impl RocksTestEnvironment { let assets = GeneratedAssets { pubkeys, - static_details, + static_details: asset_static_details, authorities, owners, dynamic_details, @@ -131,6 +138,44 @@ impl RocksTestEnvironment { assets } + /// spec_asset_class_list: list of available Asset Classes that generator will use for generated data. + pub async fn generate_assets_with_specification_classes( + &self, + cnt: usize, + slot: u64, + spec_asset_class_list: Vec, + ) -> GeneratedAssets { + let mut rng = rand::thread_rng(); + let pubkeys = (0..cnt).map(|_| self.generate_and_store_pubkey(slot)).collect::>(); + let static_details = + spec_asset_class_list.choose_multiple(&mut rng, cnt).cloned().collect::>(); + + let static_details = RocksTestEnvironmentSetup::static_data_for_different_types( + &pubkeys, + slot, + &static_details, + ); + let authorities = RocksTestEnvironmentSetup::with_authority(&pubkeys); + let owners = RocksTestEnvironmentSetup::test_owner(&pubkeys); + let dynamic_details = RocksTestEnvironmentSetup::dynamic_data(&pubkeys, slot); + let collections = RocksTestEnvironmentSetup::collection_without_authority(&pubkeys); + + let assets = GeneratedAssets { + pubkeys, + static_details, + authorities, + owners, + dynamic_details, + collections, + }; + + self.put_everything_in_the_database(&assets) + .await + .expect("Cannot store 'GeneratedAssets' into storage."); + + assets + } + fn generate_and_store_pubkey(&self, slot: u64) -> Pubkey { let pubkey = Pubkey::new_unique(); self.storage.asset_updated(slot, pubkey).expect("Cannot update assets."); @@ -196,8 +241,12 @@ impl RocksTestEnvironmentSetup { Self::generate_static_data(pubkeys, slot, SpecificationAssetClass::Nft) } - pub fn static_data_for_fungible(pubkeys: &[Pubkey], slot: u64) -> Vec { - Self::generate_static_data(pubkeys, slot, SpecificationAssetClass::FungibleToken) + pub fn static_data_for_different_types( + pubkeys: &[Pubkey], + slot: u64, + spec_asset_class_list: &[SpecificationAssetClass], + ) -> Vec { + Self::generate_static_data_with_asset_list(pubkeys, slot, spec_asset_class_list) } fn generate_static_data( @@ -217,6 +266,24 @@ impl RocksTestEnvironmentSetup { .collect() } + fn generate_static_data_with_asset_list( + pubkeys: &[Pubkey], + slot: u64, + spec_asset_class_list: &[SpecificationAssetClass], + ) -> Vec { + pubkeys + .iter() + .zip(spec_asset_class_list.iter()) + .map(|(pubkey, spec_class)| AssetStaticDetails { + pubkey: *pubkey, + created_at: slot as i64, + specification_asset_class: *spec_class, + royalty_target_type: entities::enums::RoyaltyTargetType::Creators, + edition_address: Default::default(), + }) + .collect() + } + pub fn without_authority(_: &[Pubkey]) -> Vec { Vec::new() } @@ -249,6 +316,23 @@ impl RocksTestEnvironmentSetup { .collect() } + pub fn test_one_owner(pubkeys: &[Pubkey]) -> Vec { + let owner_uuid = Pubkey::new_unique(); + pubkeys + .iter() + .map(|pubkey| AssetOwner { + pubkey: *pubkey, + owner: generate_test_updated(Some(owner_uuid.clone())), + owner_type: generate_test_updated(entities::enums::OwnerType::Single), + owner_delegate_seq: generate_test_updated(Some( + rand::thread_rng().gen_range(0..100), + )), + delegate: generate_test_updated(Some(Pubkey::new_unique())), + is_current_owner: generate_test_updated(true), + }) + .collect() + } + pub fn dynamic_data(pubkeys: &[Pubkey], slot: u64) -> Vec { pubkeys .iter() From f33f9021db634aa550e9f2e7f5dad39184f0805e Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Mon, 27 Jan 2025 22:30:32 +0100 Subject: [PATCH 24/33] MTG-1028 fix lint --- tests/setup/src/rocks.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/setup/src/rocks.rs b/tests/setup/src/rocks.rs index cc1af8fb..d0383056 100644 --- a/tests/setup/src/rocks.rs +++ b/tests/setup/src/rocks.rs @@ -322,7 +322,7 @@ impl RocksTestEnvironmentSetup { .iter() .map(|pubkey| AssetOwner { pubkey: *pubkey, - owner: generate_test_updated(Some(owner_uuid.clone())), + owner: generate_test_updated(Some(owner_uuid)), owner_type: generate_test_updated(entities::enums::OwnerType::Single), owner_delegate_seq: generate_test_updated(Some( rand::thread_rng().gen_range(0..100), From 22f65ade71cba783f042107866fd3737375583b1 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Mon, 27 Jan 2025 22:54:08 +0100 Subject: [PATCH 25/33] MTG-1028 fix lint --- nft_ingester/tests/api_tests.rs | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index bd75b771..8270159f 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -30,10 +30,7 @@ mod tests { TokenAccount, UpdateVersion, Updated, }, }; - use interface::{ - account_balance::MockAccountBalanceGetter, - json::{MockJsonDownloader, MockJsonPersister}, - }; + use interface::{account_balance::MockAccountBalanceGetter, json::MockJsonDownloader}; use metrics_utils::{ApiMetricsConfig, IngesterMetricsConfig, SynchronizerMetricsConfig}; use mockall::predicate; use mpl_token_metadata::{accounts::MasterEdition, types::Key}; @@ -2590,15 +2587,6 @@ mod tests { let api = create_api(&env, None); let owner = generated_assets.owners[0].owner.value.unwrap(); - let synchronizer = nft_ingester::index_syncronizer::Synchronizer::new( - env.rocks_env.storage.clone(), - env.pg_env.client.clone(), - 200_000, - "".to_string(), - Arc::new(SynchronizerMetricsConfig::new()), - 1, - ); - let payload = SearchAssets { limit: Some(1000), page: Some(1), @@ -2624,8 +2612,7 @@ mod tests { let number_items = 100; let cli = Cli::default(); - let (env, generated_assets) = - setup::TestEnvironment::create_noise(&cli, number_items, 100).await; + let (env, _) = setup::TestEnvironment::create_noise(&cli, number_items, 100).await; let fungible_token_mint = Pubkey::new_unique(); let mint_authority = Pubkey::new_unique(); From 39c0457624de16f13c709de4822ae060d4951400 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Tue, 28 Jan 2025 14:13:35 +0200 Subject: [PATCH 26/33] feat(api): add Raydium price fetcher cache warmup for symbols --- nft_ingester/src/api/service.rs | 22 +++++--- nft_ingester/src/raydium_price_fetcher.rs | 61 ++++++++++++++++++++--- nft_ingester/tests/price_fetch_test.rs | 9 ++++ 3 files changed, 78 insertions(+), 14 deletions(-) diff --git a/nft_ingester/src/api/service.rs b/nft_ingester/src/api/service.rs index e8ece178..e9b7f849 100644 --- a/nft_ingester/src/api/service.rs +++ b/nft_ingester/src/api/service.rs @@ -18,7 +18,7 @@ use tokio::{ sync::{broadcast::Receiver, Mutex}, task::{JoinError, JoinSet}, }; -use tracing::{error, info}; +use tracing::{error, info, warn}; use usecase::proofs::MaybeProofChecker; use uuid::Uuid; @@ -111,6 +111,20 @@ pub async fn start_api( } let addr = SocketAddr::from(([0, 0, 0, 0], port)); + let token_price_fetcher = Arc::new(RaydiumTokenPriceFetcher::new( + "https://api-v3.raydium.io".to_string(), + crate::raydium_price_fetcher::CACHE_TTL, + red_metrics, + )); + let tpf = token_price_fetcher.clone(); + tasks.lock().await.spawn(async move { + if let Err(e) = tpf.warmup().await { + warn!(error = %e, "Failed to warm up Raydium token price fetcher, cache is empty: {:?}", e); + } + let (symbol_cache_size, _) = tpf.get_cache_sizes(); + info!(%symbol_cache_size, "Warmed up Raydium token price fetcher with {} symbols", symbol_cache_size); + Ok(()) + }); let api = DasApi::new( pg_client.clone(), rocks_db, @@ -123,11 +137,7 @@ pub async fn start_api( json_middleware_config.unwrap_or_default(), account_balance_getter, storage_service_base_url, - Arc::new(RaydiumTokenPriceFetcher::new( - "https://api-v3.raydium.io".to_string(), - crate::raydium_price_fetcher::CACHE_TTL, - red_metrics, - )), + token_price_fetcher, native_mint_pubkey, ); diff --git a/nft_ingester/src/raydium_price_fetcher.rs b/nft_ingester/src/raydium_price_fetcher.rs index 24508673..c4602917 100644 --- a/nft_ingester/src/raydium_price_fetcher.rs +++ b/nft_ingester/src/raydium_price_fetcher.rs @@ -36,6 +36,43 @@ impl RaydiumTokenPriceFetcher { } } + pub async fn warmup(&self) -> Result<(), IngesterError> { + #[derive(serde::Deserialize)] + #[serde(rename_all = "camelCase")] + struct MintListItem { + address: String, + symbol: String, + } + #[derive(serde::Deserialize)] + #[serde(rename_all = "camelCase")] + struct MintListResponse { + mint_list: Vec, + } + // returns well-known token infos + let req = "mint/list"; + let response = self.get(req).await.map_err(|e| UsecaseError::Reqwest(e.to_string()))?; + + let tokens_data = response + .get("data") + .and_then(|mint_list| { + serde_json::from_value::(mint_list.clone()).ok() + }) + .ok_or_else(|| { + UsecaseError::Reqwest(format!( + "No 'data' field in RaydiumTokenPriceFetcher ids response. Full response: {:#?}", + response + )) + })?; + + for MintListItem { address, symbol } in tokens_data.mint_list { + self.symbol_cache.insert(address.clone(), symbol.clone()).await; + } + + self.symbol_cache.run_pending_tasks().await; + + Ok(()) + } + async fn get(&self, endpoint: &str) -> Result { let start_time = chrono::Utc::now(); let response = reqwest::get(format!("{host}/{ep}", host = self.host, ep = endpoint)) @@ -53,6 +90,13 @@ impl RaydiumTokenPriceFetcher { } response } + + /// Returns the approximate sizes of the symbol and the price caches. + /// + /// The return format is (symbol_cache_size, price_cache_size). + pub fn get_cache_sizes(&self) -> (u64, u64) { + (self.symbol_cache.weighted_size(), self.price_cache.weighted_size()) + } } #[async_trait] @@ -61,6 +105,12 @@ impl TokenPriceFetcher for RaydiumTokenPriceFetcher { &self, token_ids: &[String], ) -> Result, UsecaseError> { + #[derive(serde::Deserialize)] + #[serde(rename_all = "camelCase")] + struct MintIdsItem { + address: String, + symbol: String, + } let token_ids_str: Vec = token_ids.iter().map(ToString::to_string).collect(); let mut result = HashMap::with_capacity(token_ids.len()); let mut missing_token_ids = Vec::new(); @@ -80,7 +130,7 @@ impl TokenPriceFetcher for RaydiumTokenPriceFetcher { let tokens_data = response .get("data") - .and_then(|td| td.as_array()) + .and_then(|item| serde_json::from_value::>>(item.clone()).ok()) .ok_or_else(|| { UsecaseError::Reqwest(format!( "No 'data' field in RaydiumTokenPriceFetcher ids response. Full response: {:#?}", @@ -88,13 +138,8 @@ impl TokenPriceFetcher for RaydiumTokenPriceFetcher { )) })?; - for data in tokens_data { - if let (Some(address), Some(symbol)) = ( - data.get("address").and_then(|a| a.as_str()), - data.get("symbol").and_then(|s| s.as_str()), - ) { - let address = address.to_string(); - let symbol = symbol.to_string(); + for maybe_token_data in tokens_data { + if let Some(MintIdsItem { address, symbol }) = maybe_token_data { self.symbol_cache.insert(address.clone(), symbol.clone()).await; result.insert(address, symbol); } diff --git a/nft_ingester/tests/price_fetch_test.rs b/nft_ingester/tests/price_fetch_test.rs index cbeb9ba1..9083fac8 100644 --- a/nft_ingester/tests/price_fetch_test.rs +++ b/nft_ingester/tests/price_fetch_test.rs @@ -47,4 +47,13 @@ mod tests { assert!(prices.get(&token_pie).unwrap().clone() > 0.0); assert!(prices.get(&non_existed_token).is_none()); } + + #[tokio::test(flavor = "multi_thread")] + async fn test_token_price_fetcher_warmup() { + let token_price_fetcher = RaydiumTokenPriceFetcher::default(); + token_price_fetcher.warmup().await.expect("warmup must succeed"); + + // check that the cache was pre-filled by some token symbols + assert!(token_price_fetcher.get_cache_sizes().0 > 0); + } } From d8beac46473360574d30a3b95367be456707e7b5 Mon Sep 17 00:00:00 2001 From: armyhaylenko Date: Tue, 28 Jan 2025 15:13:14 +0200 Subject: [PATCH 27/33] chore(api): move raydium api url to `consts.rs` --- nft_ingester/src/api/service.rs | 2 +- nft_ingester/src/consts.rs | 1 + nft_ingester/src/lib.rs | 1 + nft_ingester/src/raydium_price_fetcher.rs | 10 ++++------ 4 files changed, 7 insertions(+), 7 deletions(-) create mode 100644 nft_ingester/src/consts.rs diff --git a/nft_ingester/src/api/service.rs b/nft_ingester/src/api/service.rs index e9b7f849..efaa27bb 100644 --- a/nft_ingester/src/api/service.rs +++ b/nft_ingester/src/api/service.rs @@ -112,7 +112,7 @@ pub async fn start_api( let addr = SocketAddr::from(([0, 0, 0, 0], port)); let token_price_fetcher = Arc::new(RaydiumTokenPriceFetcher::new( - "https://api-v3.raydium.io".to_string(), + crate::consts::RAYDIUM_API_HOST.to_string(), crate::raydium_price_fetcher::CACHE_TTL, red_metrics, )); diff --git a/nft_ingester/src/consts.rs b/nft_ingester/src/consts.rs new file mode 100644 index 00000000..2337f4e7 --- /dev/null +++ b/nft_ingester/src/consts.rs @@ -0,0 +1 @@ +pub const RAYDIUM_API_HOST: &str = "https://api-v3.raydium.io"; diff --git a/nft_ingester/src/lib.rs b/nft_ingester/src/lib.rs index 923e1cb6..b60d95cf 100644 --- a/nft_ingester/src/lib.rs +++ b/nft_ingester/src/lib.rs @@ -5,6 +5,7 @@ pub mod batch_mint; pub mod buffer; pub mod cleaners; pub mod config; +pub mod consts; pub mod error; pub mod flatbuffer_mapper; pub mod gapfiller; diff --git a/nft_ingester/src/raydium_price_fetcher.rs b/nft_ingester/src/raydium_price_fetcher.rs index c4602917..fbc72d72 100644 --- a/nft_ingester/src/raydium_price_fetcher.rs +++ b/nft_ingester/src/raydium_price_fetcher.rs @@ -18,7 +18,7 @@ pub struct RaydiumTokenPriceFetcher { impl Default for RaydiumTokenPriceFetcher { fn default() -> Self { - Self::new("https://api-v3.raydium.io".to_string(), CACHE_TTL, None) + Self::new(crate::consts::RAYDIUM_API_HOST.to_string(), CACHE_TTL, None) } } @@ -138,11 +138,9 @@ impl TokenPriceFetcher for RaydiumTokenPriceFetcher { )) })?; - for maybe_token_data in tokens_data { - if let Some(MintIdsItem { address, symbol }) = maybe_token_data { - self.symbol_cache.insert(address.clone(), symbol.clone()).await; - result.insert(address, symbol); - } + for MintIdsItem { address, symbol } in tokens_data.into_iter().flatten() { + self.symbol_cache.insert(address.clone(), symbol.clone()).await; + result.insert(address, symbol); } } From 33409390c1066fcbb0ccc4907b5ae5d8ac493c9b Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:41:33 +0100 Subject: [PATCH 28/33] Fix pg docker run script for MacOs systems. --- integration_tests/run_postgres.sh | 4 ++-- integration_tests/src/common.rs | 2 +- integration_tests/src/synchronizer_tests.rs | 2 ++ 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/integration_tests/run_postgres.sh b/integration_tests/run_postgres.sh index 95515a52..308ffe75 100755 --- a/integration_tests/run_postgres.sh +++ b/integration_tests/run_postgres.sh @@ -5,8 +5,8 @@ IMAGE_NAME="postgres:14" DB_USER="solana" DB_PASSWORD="solana" DB_NAME="solana" -DB_PATH="./db-data" -ROCKS_DUMP_PATH="./rocks_dump" +DB_PATH="$(pwd)/db-data" +ROCKS_DUMP_PATH="$(pwd)/rocks_dump" HOST_PORT="5432" CONTAINER_PORT="5432" diff --git a/integration_tests/src/common.rs b/integration_tests/src/common.rs index 442736d5..2cc02e99 100644 --- a/integration_tests/src/common.rs +++ b/integration_tests/src/common.rs @@ -65,7 +65,7 @@ const API_MAX_PAGE_LIMIT: usize = 100; const DUMP_SYNCHRONIZER_BATCH_SIZE: usize = 1000; const SYNCHRONIZER_PARALLEL_TASKS: usize = 1; -const SYNCHRONIZER_DUMP_PATH: &str = "rocks_dump"; +const SYNCHRONIZER_DUMP_PATH: &str = "./rocks_dump"; const POSTGRE_MIGRATIONS_PATH: &str = "../migrations"; const POSTGRE_BASE_DUMP_PATH: &str = "/aura/integration_tests/"; diff --git a/integration_tests/src/synchronizer_tests.rs b/integration_tests/src/synchronizer_tests.rs index 0cc92ebc..10fb02fa 100644 --- a/integration_tests/src/synchronizer_tests.rs +++ b/integration_tests/src/synchronizer_tests.rs @@ -17,6 +17,8 @@ use tokio::{ use super::common::*; + + #[tokio::test] #[serial] #[named] From ebf247309a7f075f6abf13617d09ceb0dc7179f8 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Tue, 28 Jan 2025 15:49:05 +0100 Subject: [PATCH 29/33] Fix fmt --- integration_tests/src/synchronizer_tests.rs | 2 -- tests/setup/src/rocks.rs | 10 +--------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/integration_tests/src/synchronizer_tests.rs b/integration_tests/src/synchronizer_tests.rs index 10fb02fa..0cc92ebc 100644 --- a/integration_tests/src/synchronizer_tests.rs +++ b/integration_tests/src/synchronizer_tests.rs @@ -17,8 +17,6 @@ use tokio::{ use super::common::*; - - #[tokio::test] #[serial] #[named] diff --git a/tests/setup/src/rocks.rs b/tests/setup/src/rocks.rs index d0383056..7ae982e8 100644 --- a/tests/setup/src/rocks.rs +++ b/tests/setup/src/rocks.rs @@ -150,7 +150,7 @@ impl RocksTestEnvironment { let static_details = spec_asset_class_list.choose_multiple(&mut rng, cnt).cloned().collect::>(); - let static_details = RocksTestEnvironmentSetup::static_data_for_different_types( + let static_details = RocksTestEnvironmentSetup::generate_static_data_with_asset_list( &pubkeys, slot, &static_details, @@ -241,14 +241,6 @@ impl RocksTestEnvironmentSetup { Self::generate_static_data(pubkeys, slot, SpecificationAssetClass::Nft) } - pub fn static_data_for_different_types( - pubkeys: &[Pubkey], - slot: u64, - spec_asset_class_list: &[SpecificationAssetClass], - ) -> Vec { - Self::generate_static_data_with_asset_list(pubkeys, slot, spec_asset_class_list) - } - fn generate_static_data( pubkeys: &[Pubkey], slot: u64, From 39a0b252a4d2b2efa5497cece72de6090e7eddd1 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Tue, 28 Jan 2025 22:06:32 +0100 Subject: [PATCH 30/33] MTG-1028 fix integration tests --- nft_ingester/tests/api_tests.rs | 78 ++++++++++++++++++++++++++++++--- 1 file changed, 72 insertions(+), 6 deletions(-) diff --git a/nft_ingester/tests/api_tests.rs b/nft_ingester/tests/api_tests.rs index 8270159f..e81d3323 100644 --- a/nft_ingester/tests/api_tests.rs +++ b/nft_ingester/tests/api_tests.rs @@ -30,7 +30,10 @@ mod tests { TokenAccount, UpdateVersion, Updated, }, }; - use interface::{account_balance::MockAccountBalanceGetter, json::MockJsonDownloader}; + use interface::{ + account_balance::MockAccountBalanceGetter, + json::{MockJsonDownloader, MockJsonPersister}, + }; use metrics_utils::{ApiMetricsConfig, IngesterMetricsConfig, SynchronizerMetricsConfig}; use mockall::predicate; use mpl_token_metadata::{accounts::MasterEdition, types::Key}; @@ -2023,10 +2026,29 @@ mod tests { Ok(interface::json::JsonDownloadResult::JsonContent(offchain_data.to_string())) }); - let api = create_api( - &env, - Some(JsonMiddlewareConfig { is_enabled: true, max_urls_to_parse: 10 }), + let api = nft_ingester::api::api_impl::DasApi::< + MaybeProofChecker, + MockJsonDownloader, + MockJsonPersister, + MockAccountBalanceGetter, + RaydiumTokenPriceFetcher, + Storage, + >::new( + env.pg_env.client.clone(), + env.rocks_env.storage.clone(), + Arc::new(ApiMetricsConfig::new()), + None, + None, + 50, + Some(Arc::new(mock_middleware)), + None, + JsonMiddlewareConfig { is_enabled: true, max_urls_to_parse: 10 }, + Arc::new(MockAccountBalanceGetter::new()), + None, + Arc::new(RaydiumTokenPriceFetcher::default()), + NATIVE_MINT_PUBKEY.to_string(), ); + let pb = Pubkey::new_unique(); let authority = Pubkey::new_unique(); @@ -2149,7 +2171,29 @@ mod tests { let cli = Cli::default(); let (env, _) = setup::TestEnvironment::create(&cli, cnt, SLOT_UPDATED).await; - let api = create_api(&env, None); + let api = nft_ingester::api::api_impl::DasApi::< + MaybeProofChecker, + JsonWorker, + JsonWorker, + MockAccountBalanceGetter, + RaydiumTokenPriceFetcher, + Storage, + >::new( + env.pg_env.client.clone(), + env.rocks_env.storage.clone(), + Arc::new(ApiMetricsConfig::new()), + None, + Some(env.rocks_env.storage.clone()), + 50, + None, + None, + JsonMiddlewareConfig::default(), + Arc::new(MockAccountBalanceGetter::new()), + None, + Arc::new(RaydiumTokenPriceFetcher::default()), + NATIVE_MINT_PUBKEY.to_string(), + ); + let asset_id = Pubkey::new_unique(); let tree_id = Pubkey::new_unique(); env.rocks_env @@ -2290,7 +2334,29 @@ mod tests { .expect_get_account_balance_lamports() .returning(move |_| Ok(10_u64.pow(9))); - let api = create_api(&env, None); + let api = nft_ingester::api::api_impl::DasApi::< + MaybeProofChecker, + JsonWorker, + JsonWorker, + MockAccountBalanceGetter, + RaydiumTokenPriceFetcher, + Storage, + >::new( + env.pg_env.client.clone(), + env.rocks_env.storage.clone(), + Arc::new(ApiMetricsConfig::new()), + None, + None, + 50, + None, + None, + JsonMiddlewareConfig::default(), + Arc::new(mock_account_balance_getter), + None, + Arc::new(RaydiumTokenPriceFetcher::default()), + NATIVE_MINT_PUBKEY.to_string(), + ); + let tasks = JoinSet::new(); let mutexed_tasks = Arc::new(Mutex::new(tasks)); let payload = SearchAssets { From 9d737cfa93680fb4713c07568e1466d8b3a8b024 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Wed, 29 Jan 2025 00:43:25 +0100 Subject: [PATCH 31/33] MTG-1028 Add integration tests for freeze authorities & delegate authorities - add integration test with onchain data --- ...38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq | Bin 0 -> 824 bytes ...jFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v | Bin 0 -> 224 bytes ...ETo8T8wMcN2wCjav8VK6eh3dLk63evNDPxzLSJra8B | Bin 0 -> 312 bytes ...jFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v | 1 + integration_tests/src/lib.rs | 1 + integration_tests/src/regular_nft_tests.rs | 11 ++- ..._fungible_token_mint_freeze_authority.snap | 73 ++++++++++++++++++ integration_tests/src/token_tests.rs | 56 ++++++++++++++ 8 files changed, 140 insertions(+), 2 deletions(-) create mode 100644 integration_tests/src/data/accounts/fungible_token_mint_freeze_authority/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq create mode 100644 integration_tests/src/data/accounts/fungible_token_mint_freeze_authority/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v create mode 100644 integration_tests/src/data/accounts/fungible_token_mint_freeze_authority/FGETo8T8wMcN2wCjav8VK6eh3dLk63evNDPxzLSJra8B create mode 100644 integration_tests/src/data/largest_token_account_ids/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v create mode 100644 integration_tests/src/snapshots/integration_tests__token_tests__fungible_token_mint_freeze_authority.snap create mode 100644 integration_tests/src/token_tests.rs diff --git a/integration_tests/src/data/accounts/fungible_token_mint_freeze_authority/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq b/integration_tests/src/data/accounts/fungible_token_mint_freeze_authority/5x38Kp4hvdomTCnCrAny4UtMUt5rQBdB6px2K1Ui45Wq new file mode 100644 index 0000000000000000000000000000000000000000..767786dfb1666d0cb9562a8269f9e694a4c530b3 GIT binary patch literal 824 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQ^z4vWJ0r8`dyU|(_V&if>DTl+RleQM z&c0lG+(-Xrq7`>R>c+*a*YcIp~Q=pvXObiSxGLIwQMltHK{1VZgQz|7B*M9!O0x{`1t^Y4eSDgKG zHjEUJ*KQ@u=?ze~6Qx{IYdHSK-x!09zWVW=vjgK~;Xw~zzweCN|N|2dSzFj$C zK>9xvFap^iaK^)M{S+W42*?JB&G^^$qIdSASKSBCENSo7>^S7`Rl7xH{}#TBi|(F| zlbn5q$NBMm14RZlMv&^Hf>m9SN2joE32Hl$Ui9p&P~PmWSSyR*T=VZ+L|$x5XJ7yT D0%BT= literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/fungible_token_mint_freeze_authority/FGETo8T8wMcN2wCjav8VK6eh3dLk63evNDPxzLSJra8B b/integration_tests/src/data/accounts/fungible_token_mint_freeze_authority/FGETo8T8wMcN2wCjav8VK6eh3dLk63evNDPxzLSJra8B new file mode 100644 index 0000000000000000000000000000000000000000..37c701ac0d8e7bd997e4aa4e85b23862cab9e73c GIT binary patch literal 312 zcmY#jfB*@G90nE!4+a$=H-NzfNJap00)qsQy!@)?>zXwy&ax_$3W>)!pZn?+dvo4B zg@xIns^%7-r{7__`|aWN)P<98o_^?d?serFnJukvxdFc_ G?6Lq1$66Br literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/largest_token_account_ids/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v b/integration_tests/src/data/largest_token_account_ids/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v new file mode 100644 index 00000000..c79216f3 --- /dev/null +++ b/integration_tests/src/data/largest_token_account_ids/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v/EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v @@ -0,0 +1 @@ +ÓêŒõ¬¬¨Í u\CÎõJ]ÙžÞ ¡kU%78ó—Ü \ No newline at end of file diff --git a/integration_tests/src/lib.rs b/integration_tests/src/lib.rs index e000632d..dd339e42 100644 --- a/integration_tests/src/lib.rs +++ b/integration_tests/src/lib.rs @@ -7,3 +7,4 @@ mod general_scenario_tests; mod mpl_core_tests; mod regular_nft_tests; mod synchronizer_tests; +mod token_tests; diff --git a/integration_tests/src/regular_nft_tests.rs b/integration_tests/src/regular_nft_tests.rs index 6bcd6623..fbfbe9af 100644 --- a/integration_tests/src/regular_nft_tests.rs +++ b/integration_tests/src/regular_nft_tests.rs @@ -1,11 +1,18 @@ use std::sync::Arc; -use entities::api_req_params::{GetAsset, GetAssetBatch, GetAssetsByGroup, SearchAssets}; +use entities::{ + api_req_params::{GetAsset, GetAssetBatch, GetAssetsByGroup, SearchAssets}, + enums::AssetType, +}; use function_name::named; use itertools::Itertools; +use nft_ingester::api::dapi::response::AssetList; use rocks_db::storage_traits::AssetIndexReader; use serial_test::serial; -use tokio::{sync::Mutex, task::JoinSet}; +use tokio::{ + sync::{broadcast, Mutex}, + task::JoinSet, +}; use super::common::*; diff --git a/integration_tests/src/snapshots/integration_tests__token_tests__fungible_token_mint_freeze_authority.snap b/integration_tests/src/snapshots/integration_tests__token_tests__fungible_token_mint_freeze_authority.snap new file mode 100644 index 00000000..86e01676 --- /dev/null +++ b/integration_tests/src/snapshots/integration_tests__token_tests__fungible_token_mint_freeze_authority.snap @@ -0,0 +1,73 @@ +--- +source: integration_tests/src/token_tests.rs +assertion_line: 38 +expression: response +snapshot_kind: text +--- +{ + "interface": "Custom", + "id": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "content": { + "$schema": "https://schema.metaplex.com/nft1.0.json", + "json_uri": "", + "files": [], + "metadata": { + "name": "USD Coin", + "symbol": "USDC" + }, + "links": {} + }, + "authorities": [ + { + "address": "2wmVCSfPxGPjrnMMn7rchp4uaeoTqN39mXFC2zhPdri9", + "scopes": [ + "full" + ] + } + ], + "compression": { + "eligible": false, + "compressed": false, + "data_hash": "", + "creator_hash": "", + "asset_hash": "", + "tree": "", + "seq": 0, + "leaf_id": 0 + }, + "grouping": [], + "royalty": { + "royalty_model": "creators", + "target": null, + "percent": 0.0, + "basis_points": 0, + "primary_sale_happened": false, + "locked": false + }, + "creators": [], + "ownership": { + "frozen": false, + "delegated": false, + "delegate": null, + "ownership_model": "token", + "owner": "" + }, + "supply": { + "print_max_supply": 0, + "print_current_supply": 0, + "edition_nonce": 252 + }, + "mutable": true, + "burnt": false, + "lamports": 5616720, + "executable": false, + "metadata_owner": "metaqbxxUerdq28cj1RbAWkYQm3ybzjb6a8bt518x1s", + "rent_epoch": 18446744073709551615, + "token_info": { + "supply": 9342137502207180, + "decimals": 6, + "token_program": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "mint_authority": "BJE5MMbqXjVwjAF7oxwPYXnTXDyspzZyt4vwenNw5ruG", + "freeze_authority": "7dGbd2QZcCKcTndnHcTL8q7SMVXAkp688NTQYwrRCrar" + } +} diff --git a/integration_tests/src/token_tests.rs b/integration_tests/src/token_tests.rs new file mode 100644 index 00000000..ca08989b --- /dev/null +++ b/integration_tests/src/token_tests.rs @@ -0,0 +1,56 @@ +use std::sync::Arc; + +use entities::{api_req_params::GetAsset, enums::AssetType}; +use function_name::named; +use itertools::Itertools; +use nft_ingester::api::dapi::response::{AssetList, TokenAccountsList}; +use serial_test::serial; +use tokio::{ + sync::{broadcast, Mutex}, + task::JoinSet, +}; + +use crate::common::{ + index_seed_events, seed_token_mints, trim_test_name, Network, SeedEvent, TestSetup, + TestSetupOptions, +}; + +#[tokio::test] +#[serial] +#[named] +async fn test_fungible_token_mint_freeze_authority() { + let name = trim_test_name(function_name!()); + let setup = TestSetup::new_with_options( + name.clone(), + TestSetupOptions { network: Some(Network::Mainnet), clear_db: true }, + ) + .await; + + // USDC token + let seeds: Vec = seed_token_mints(&["EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v"]); + + index_seed_events(&setup, seeds.iter().collect_vec()).await; + + let request = r#" + { + "id": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" + } + "#; + + let mutexed_tasks = Arc::new(Mutex::new(JoinSet::new())); + + let request: GetAsset = serde_json::from_str(request).unwrap(); + let response_value = setup.das_api.get_asset(request, mutexed_tasks.clone()).await.unwrap(); + let res: AssetList = serde_json::from_value(response_value.clone()).unwrap(); + + insta::assert_json_snapshot!(name, response_value.clone()); + + // assert_eq!( + // res.items[0].clone().token_info.unwrap().mint_authority.unwrap(), + // "BJE5MMbqXjVwjAF7oxwPYXnTXDyspzZyt4vwenNw5ruG".to_string() + // ); + // assert_eq!( + // res.items[0].clone().token_info.unwrap().freeze_authority.unwrap(), + // "7dGbd2QZcCKcTndnHcTL8q7SMVXAkp688NTQYwrRCrar".to_string() + // ); +} From b91a9d58cb42ebd33e72eabf22b038826ee7d2a7 Mon Sep 17 00:00:00 2001 From: andrii_kl <18900364+andrii-kl@users.noreply.github.com> Date: Wed, 29 Jan 2025 12:09:25 +0100 Subject: [PATCH 32/33] MTG-1277 Fixed sh script launch for running pg docker (test env) --- integration_tests/run_postgres.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/integration_tests/run_postgres.sh b/integration_tests/run_postgres.sh index 308ffe75..463453f2 100755 --- a/integration_tests/run_postgres.sh +++ b/integration_tests/run_postgres.sh @@ -5,8 +5,9 @@ IMAGE_NAME="postgres:14" DB_USER="solana" DB_PASSWORD="solana" DB_NAME="solana" -DB_PATH="$(pwd)/db-data" -ROCKS_DUMP_PATH="$(pwd)/rocks_dump" +SCRIPT_DIR="$(dirname "$(realpath "$0")")" +DB_PATH="$SCRIPT_DIR/db-data" +ROCKS_DUMP_PATH="$SCRIPT_DIR/rocks_dump" HOST_PORT="5432" CONTAINER_PORT="5432" From 325257bcbccf96fe706dbae4a79f1d21f4716252 Mon Sep 17 00:00:00 2001 From: Kyrylo Stepanov Date: Wed, 29 Jan 2025 14:05:53 +0200 Subject: [PATCH 33/33] Added a possibility to do integration tests using Eclipse && Added test for the case when fungible tokens are returned while only NFTs are requested (#388) --- .github/workflows/rust.yml | 2 + integration_tests/src/account_update_tests.rs | 4 +- integration_tests/src/common.rs | 10 +- ...QDwULQDdpisGssKZeRw2qcCTiZnsAmi6cnR89YYxSg | Bin 0 -> 304 bytes ...vjE7bDpwA2nFp5KbjWHjG2RHBWi5z1pP5ehY9t6p8V | Bin 0 -> 304 bytes ...RQs1xZdASeL65PHTa1C8GnYCWtX18Lx98ofJB3SZNC | Bin 0 -> 304 bytes ...k1Zv557DAnichMsWE4cfURYbr1D2yWfcaqehydHo9R | Bin 0 -> 304 bytes ...peBtH5MwfA5t9uhr51AYL7MR5DbPJ5xQ7wizzvowUH | Bin 0 -> 872 bytes ...kXycbrAhVzeB9ngnjcCdjk5bxTJYzscSZMhRRBx3QB | Bin 0 -> 680 bytes ...KGo1z9k3PjTsQw5GDQmvAbKwuRGtb4APkCneH8AVY1 | Bin 0 -> 616 bytes ...A21TR9QTsQeR5sP6L2PytjgxXcVRSyqUY5vRcUogom | Bin 0 -> 600 bytes ...pMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ | Bin 0 -> 616 bytes ...nRA9ALhDYC5SWhBrw19JVWnDxnrGMYTmkfLsLkbpzV | Bin 0 -> 304 bytes ...peBtH5MwfA5t9uhr51AYL7MR5DbPJ5xQ7wizzvowUH | 1 + ...kXycbrAhVzeB9ngnjcCdjk5bxTJYzscSZMhRRBx3QB | 1 + ...KGo1z9k3PjTsQw5GDQmvAbKwuRGtb4APkCneH8AVY1 | 1 + ...A21TR9QTsQeR5sP6L2PytjgxXcVRSyqUY5vRcUogom | 1 + ...pMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ | 1 + integration_tests/src/regular_nft_tests.rs | 48 ++ ...ested_non_fungibles_are_non_fungibles.snap | 458 ++++++++++++++++++ nft_ingester/tests/api_tests.rs | 174 +++++++ 21 files changed, 694 insertions(+), 7 deletions(-) create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/2TQDwULQDdpisGssKZeRw2qcCTiZnsAmi6cnR89YYxSg create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/44vjE7bDpwA2nFp5KbjWHjG2RHBWi5z1pP5ehY9t6p8V create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/4pRQs1xZdASeL65PHTa1C8GnYCWtX18Lx98ofJB3SZNC create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/5ok1Zv557DAnichMsWE4cfURYbr1D2yWfcaqehydHo9R create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/75peBtH5MwfA5t9uhr51AYL7MR5DbPJ5xQ7wizzvowUH create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/7ZkXycbrAhVzeB9ngnjcCdjk5bxTJYzscSZMhRRBx3QB create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/8WKGo1z9k3PjTsQw5GDQmvAbKwuRGtb4APkCneH8AVY1 create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/9qA21TR9QTsQeR5sP6L2PytjgxXcVRSyqUY5vRcUogom create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ create mode 100644 integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/JCnRA9ALhDYC5SWhBrw19JVWnDxnrGMYTmkfLsLkbpzV create mode 100644 integration_tests/src/data/largest_token_account_ids/75peBtH5MwfA5t9uhr51AYL7MR5DbPJ5xQ7wizzvowUH/75peBtH5MwfA5t9uhr51AYL7MR5DbPJ5xQ7wizzvowUH create mode 100644 integration_tests/src/data/largest_token_account_ids/7ZkXycbrAhVzeB9ngnjcCdjk5bxTJYzscSZMhRRBx3QB/7ZkXycbrAhVzeB9ngnjcCdjk5bxTJYzscSZMhRRBx3QB create mode 100644 integration_tests/src/data/largest_token_account_ids/8WKGo1z9k3PjTsQw5GDQmvAbKwuRGtb4APkCneH8AVY1/8WKGo1z9k3PjTsQw5GDQmvAbKwuRGtb4APkCneH8AVY1 create mode 100644 integration_tests/src/data/largest_token_account_ids/9qA21TR9QTsQeR5sP6L2PytjgxXcVRSyqUY5vRcUogom/9qA21TR9QTsQeR5sP6L2PytjgxXcVRSyqUY5vRcUogom create mode 100644 integration_tests/src/data/largest_token_account_ids/DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ/DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ create mode 100644 integration_tests/src/snapshots/integration_tests__regular_nft_tests__requested_non_fungibles_are_non_fungibles.snap diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 5e6bebb7..2e436593 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -136,4 +136,6 @@ jobs: DATABASE_TEST_URL: "postgres://postgres:postgres@127.0.0.1:5432/postgres" DEVNET_RPC_URL: ${{ secrets.SOLANA_DEVNET_RPC_URL }} MAINNET_RPC_URL: ${{ secrets.SOLANA_MAINNET_RPC_URL }} + ECLIPSE_DEVNET_RPC_URL: ${{ secrets.SOLANA_ECLIPSE_DEVNET_RPC_URL }} + ECLIPSE_MAINNET_RPC_URL: ${{ secrets.SOLANA_ECLIPSE_MAINNET_RPC_URL }} run: cargo test --features integration_tests -- --nocapture \ No newline at end of file diff --git a/integration_tests/src/account_update_tests.rs b/integration_tests/src/account_update_tests.rs index 904efb6b..1d03b1a3 100644 --- a/integration_tests/src/account_update_tests.rs +++ b/integration_tests/src/account_update_tests.rs @@ -172,7 +172,7 @@ async fn test_account_updates() { setup.clean_up_data_bases().await; - index_nft(&setup, mint).await; + index_nft_accounts(&setup, get_nft_accounts(&setup, mint).await).await; let response = setup.das_api.get_asset(request.clone(), mutexed_tasks.clone()).await.unwrap(); @@ -217,7 +217,7 @@ async fn test_account_updates() { for named_update in named_updates.clone() { setup.clean_up_data_bases().await; - index_nft(&setup, mint).await; + index_nft_accounts(&setup, get_nft_accounts(&setup, mint).await).await; let other_named_updates = named_updates .clone() diff --git a/integration_tests/src/common.rs b/integration_tests/src/common.rs index 2cc02e99..a0f4ac32 100644 --- a/integration_tests/src/common.rs +++ b/integration_tests/src/common.rs @@ -117,6 +117,8 @@ impl TestSetup { let rpc_url = match opts.network.unwrap_or_default() { Network::Mainnet => std::env::var("MAINNET_RPC_URL").unwrap(), Network::Devnet => std::env::var("DEVNET_RPC_URL").unwrap(), + Network::EclipseMainnet => std::env::var("ECLIPSE_MAINNET_RPC_URL").unwrap(), + Network::EclipseDevnet => std::env::var("ECLIPSE_DEVNET_RPC_URL").unwrap(), }; let client = Arc::new(RpcClient::new(rpc_url.to_string())); @@ -537,6 +539,8 @@ pub enum Network { #[default] Mainnet, Devnet, + EclipseMainnet, + EclipseDevnet, } #[derive(Clone, Copy, Debug)] @@ -553,7 +557,7 @@ pub async fn index_seed_events(setup: &TestSetup, events: Vec<&SeedEvent>) { index_and_sync_account_with_ordered_slot(setup, *account).await; }, SeedEvent::Nft(mint) => { - index_nft(setup, *mint).await; + index_nft_accounts(setup, get_nft_accounts(setup, *mint).await).await; }, SeedEvent::Signature(sig) => { index_transaction(setup, *sig).await; @@ -688,10 +692,6 @@ async fn index_token_mint(setup: &TestSetup, mint: Pubkey) { } } -pub async fn index_nft(setup: &TestSetup, mint: Pubkey) { - index_nft_accounts(setup, get_nft_accounts(setup, mint).await).await; -} - pub async fn index_nft_accounts(setup: &TestSetup, nft_accounts: NftAccounts) { for account in [nft_accounts.mint, nft_accounts.metadata, nft_accounts.token] { index_account(setup, account).await; diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/2TQDwULQDdpisGssKZeRw2qcCTiZnsAmi6cnR89YYxSg b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/2TQDwULQDdpisGssKZeRw2qcCTiZnsAmi6cnR89YYxSg new file mode 100644 index 0000000000000000000000000000000000000000..b9f542f02d8e60a86b2f36e8f1027329a548fc8a GIT binary patch literal 304 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1dtS+&;1~!E9vDocmL8A&i9sDztp<4Zff?W zjj7_FeU2XyWxMl v$2kQKyDg*tbFXTbS$g4}#zSB8N!|ibc!0by; literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/44vjE7bDpwA2nFp5KbjWHjG2RHBWi5z1pP5ehY9t6p8V b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/44vjE7bDpwA2nFp5KbjWHjG2RHBWi5z1pP5ehY9t6p8V new file mode 100644 index 0000000000000000000000000000000000000000..6f27a1b39270443dbd92817374b95b28d04909de GIT binary patch literal 304 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1d!C7(z|a3-|3Wm-fPD;c}mvbYu@-*T-;%< z$(~hJs`D$@*zSIN_^!17o`h5Eo+mkHZ*7zM-G3T*6$**&^wy3RcbkvTE_+rC+W-xwGt4oa?>l v$2kQKyDg*tbFXTbS$g4}#zSB8N!|ibcxYJ2$ literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/4pRQs1xZdASeL65PHTa1C8GnYCWtX18Lx98ofJB3SZNC b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/4pRQs1xZdASeL65PHTa1C8GnYCWtX18Lx98ofJB3SZNC new file mode 100644 index 0000000000000000000000000000000000000000..2f9bffb2ceaa87d3e339374bdec90b2be2c51176 GIT binary patch literal 304 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1dz1Y@%7JwJE~=d{Q{9*Qk+^V^fTU`o&9Eo z;;}FHZTR0^WxMl&%LT$X6c1@8V`NVCv^)*|DDC!4>J>LG+v4nwI~KLv4gn)wZBV$ literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/5ok1Zv557DAnichMsWE4cfURYbr1D2yWfcaqehydHo9R b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/5ok1Zv557DAnichMsWE4cfURYbr1D2yWfcaqehydHo9R new file mode 100644 index 0000000000000000000000000000000000000000..1d18365ce2d11796748b18cd9227b7e174e75889 GIT binary patch literal 304 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1dw#kkXY=zrq`AEWM z2%#9JKIUIB1;}~A1QE!|<9FHfNV?c&eWy_DA^(WEn2v3(KcJr+ZLKkFntV+ zAQmCjP)n#uF$ys_5VV(2NQn_)#;M6Q%$xr#dG_I4N8Ee!d)7+PMJr_UZcXYrDR4?+ zeNrP2g91T!2s89C5(*FXC8 z>!oGpq$VX6r|Ko==juZhrsU@)X66;cxVo7oskwSt#rb(0K)oVBT$Ep#m{U?257d}h z9AA*1nO9QG3=}stFwg-~ZceEM0jZ(Mxo)KeS>EPF;rV$^rs>`#Ss{jIj!CW+c^ScB z&W1@&0gi=fAZ>;Q2K+!OwW1)iD6u3nKQBHdu_ToXBx7!7YG7yqq}YHwh`X?PjT5ZB zBsH%XsGA+cfp{6DD5S0so3AK#mdz7htoa`53QeI|SS(#~`m{k_w6BQDcnP?VR6<(EY=;)a3U6NQ@ zSXc~-Nf96}%CAh!DXEN4N-WMSjxWg1%quBo0g9U%7#QdPX~*JV??P{L=j;l%&@^N3 o#Qeajsv?UhpMYG`G`IB9(2_7$)4+0fN1wpJJdm3V4Gb6<0H=ZBwg3PC literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/8WKGo1z9k3PjTsQw5GDQmvAbKwuRGtb4APkCneH8AVY1 b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/8WKGo1z9k3PjTsQw5GDQmvAbKwuRGtb4APkCneH8AVY1 new file mode 100644 index 0000000000000000000000000000000000000000..868f86c5b4a1da07e8121d1c1052dc0e9a52cb06 GIT binary patch literal 616 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1dz;c31Bc*s}D<*J~VaLE2ALG-TVKiq#lsr zp7cfH>YRc;w!7aRzANp&C*c&k=Sj}lTic}m_%`q4RsXm5r^bzi-G5|&W_;KQB!GYs zLNS!y;$JZZ$aw-4klei4R_On?RG%~9>kq9EU^gz`Hs!;#Nz2|WW1Nv^>Sh7c$G`|; z5mF7cgqjqi5Q76jdkKY<7!}MAX4pirJ6M4sQ-vvh_0y(u(zy64SDaQZv&Fjf=`E&5X@*%Cd}%OEObTGxIA_O!CW0k~7QF dk}@)~ic3s0vdz;nsxnhkD$`TTjT1r9003SX%MSnm literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/9qA21TR9QTsQeR5sP6L2PytjgxXcVRSyqUY5vRcUogom b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/9qA21TR9QTsQeR5sP6L2PytjgxXcVRSyqUY5vRcUogom new file mode 100644 index 0000000000000000000000000000000000000000..12b0481d4e5b760521649ec6c9969811da47f3df GIT binary patch literal 600 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1dwbtp7<;2j{S^YF_|aM8A~liw^x1J_qDWk z%A(c>S?AV-u-*Ok@Lg&DJqf4SJx_AZ-r6Sh$G3SWulm2eKQ(SF?EWJIG=t+ikN^Tk z2*q&!8vlwZK+YAYK-jkA`{`d3+2o5R9cN{GE|O{~nJzs)>FkPDcb?_5PbI$Yhaj?tZQOnX{?)Qlwz)HW|EX@ Pl45FZW@2IqG=c#DkSwwe literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/DvpMQyF8sT6hPBewQf6VrVESw6L1zewPyNit1CSt1tDJ new file mode 100644 index 0000000000000000000000000000000000000000..cf8a5a53fc22c2ff1ccee4adda5fc1e6d41c222c GIT binary patch literal 616 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1du!+I#I(r<(67$oP0V1-Zf*5kQfQl9GaAD}DXEw36bI z{G!D4RK1+cylnlWjBN9?jH=Akl*;tfa^pl$Gynja_SFmk literal 0 HcmV?d00001 diff --git a/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/JCnRA9ALhDYC5SWhBrw19JVWnDxnrGMYTmkfLsLkbpzV b/integration_tests/src/data/accounts/requested_non_fungibles_are_non_fungibles/JCnRA9ALhDYC5SWhBrw19JVWnDxnrGMYTmkfLsLkbpzV new file mode 100644 index 0000000000000000000000000000000000000000..84d96ed58e92bbf7e82a2780c5d00e375a1792ad GIT binary patch literal 304 zcmY#jfB*@G1O^rc4+a%5*9Axh0C5C^1d#kcJ3C}{aF|@@=H-T~LtaUQh22>5PVx9f zbu->~e(nZMY%Hj5 uIRy^8Eu;T)uWFZBdf}bMLtpbr-2&2oXR-Ff%!C?^mm)::new( + env.pg_env.client.clone(), + env.rocks_env.storage.clone(), + Arc::new(ApiMetricsConfig::new()), + None, + None, + 50, + None, + None, + JsonMiddlewareConfig::default(), + Arc::new(MockAccountBalanceGetter::new()), + None, + Arc::new(RaydiumTokenPriceFetcher::default()), + NATIVE_MINT_PUBKEY.to_string(), + ); + let tasks = JoinSet::new(); + let mutexed_tasks = Arc::new(Mutex::new(tasks)); + + let token_updates_processor = + TokenAccountsProcessor::new(Arc::new(IngesterMetricsConfig::new())); + let mplx_updates_processor = + MplxAccountsProcessor::new(Arc::new(IngesterMetricsConfig::new())); + + let token_key = Pubkey::new_unique(); + let mint_key = Pubkey::new_unique(); + let owner_key = Pubkey::new_unique(); + + let mint_auth_key = Pubkey::new_unique(); + + let token_acc = TokenAccount { + pubkey: token_key, + mint: mint_key, + delegate: None, + owner: owner_key, + extensions: None, + frozen: false, + delegated_amount: 0, + slot_updated: 1, + amount: 1, + write_version: 1, + }; + + let mint_acc = Mint { + pubkey: mint_key, + slot_updated: 1, + supply: 10, + decimals: 0, + mint_authority: Some(mint_auth_key), + freeze_authority: None, + token_program: Default::default(), + extensions: None, + write_version: 1, + }; + + let metadata = MetadataInfo { + metadata: Metadata { + key: Key::MetadataV1, + update_authority: Pubkey::new_unique(), + mint: mint_key, + name: "".to_string(), + symbol: "".to_string(), + uri: "".to_string(), + seller_fee_basis_points: 0, + creators: None, + primary_sale_happened: false, + is_mutable: true, + edition_nonce: None, + token_standard: Some(mpl_token_metadata::types::TokenStandard::Fungible), + collection: None, + uses: None, + collection_details: None, + programmable_config: None, + }, + slot_updated: 1, + write_version: 1, + lamports: 1, + executable: false, + metadata_owner: None, + rent_epoch: 0, + }; + let offchain_data = OffChainData { + url: Some("https://ping-pong".to_string()), + metadata: Some("{\"msg\": \"hallo\"}".to_string()), + ..Default::default() + }; + + env.rocks_env + .storage + .asset_offchain_data + .put(offchain_data.url.clone().unwrap(), offchain_data.clone()) + .unwrap(); + + let mut batch_storage = BatchSaveStorage::new( + env.rocks_env.storage.clone(), + 10, + Arc::new(IngesterMetricsConfig::new()), + ); + token_updates_processor + .transform_and_save_mint_account(&mut batch_storage, &mint_acc) + .unwrap(); + token_updates_processor + .transform_and_save_token_account(&mut batch_storage, token_acc.pubkey, &token_acc) + .unwrap(); + + mplx_updates_processor + .transform_and_store_metadata_account(&mut batch_storage, mint_key, &metadata) + .unwrap(); + batch_storage.flush().unwrap(); + + let payload = GetAsset { + id: mint_key.to_string(), + options: Options { show_unverified_collections: true, ..Default::default() }, + }; + let response = api.get_asset(payload.clone(), mutexed_tasks.clone()).await.unwrap(); + + assert_eq!(response["interface"], "FungibleToken".to_string()); + + // Given record that respects fungible metadata + // When updated comes with NFT metadata + // Then it should transfer to NFT + + let metadata = MetadataInfo { + metadata: Metadata { + token_standard: Some(mpl_token_metadata::types::TokenStandard::NonFungible), + ..metadata.metadata + }, + ..metadata + }; + + mplx_updates_processor + .transform_and_store_metadata_account(&mut batch_storage, mint_key, &metadata) + .unwrap(); + batch_storage.flush().unwrap(); + let response = api.get_asset(payload.clone(), mutexed_tasks.clone()).await.unwrap(); + + assert_eq!(response["interface"], "V1_NFT".to_string()); + + // Given record that respects NFT metadata + // When updated comes with fungible metadata + // Then it should stay as NFT + let metadata = MetadataInfo { + metadata: Metadata { + token_standard: Some(mpl_token_metadata::types::TokenStandard::Fungible), + ..metadata.metadata + }, + ..metadata + }; + + mplx_updates_processor + .transform_and_store_metadata_account(&mut batch_storage, mint_key, &metadata) + .unwrap(); + batch_storage.flush().unwrap(); + let response = api.get_asset(payload.clone(), mutexed_tasks.clone()).await.unwrap(); + + assert_eq!(response["interface"], "V1_NFT".to_string()); + } }