Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MET-46] feat: dynamic data slot for each field #6

Merged
merged 19 commits into from
Dec 21, 2023
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2,659 changes: 655 additions & 2,004 deletions Cargo.lock

Large diffs are not rendered by default.

3 changes: 0 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@ members = [
"postgre-client",
"entities",
]
exclude = [
"migration",
]

[profile.release]
lto = true
Expand Down
20 changes: 4 additions & 16 deletions Makefile
Original file line number Diff line number Diff line change
@@ -1,27 +1,15 @@
.PHONY: build start build-json-downloader start-json-downloader build-backfiller start-backfiller dev stop
.PHONY: build start dev stop

SHELL := /bin/bash

build:
@docker compose -f docker-compose.yaml build ingester-first-consumer ingester-second-consumer
@docker compose -f docker-compose.yaml build ingester-first-consumer

start:
@docker compose -f docker-compose.yaml up -d ingester-first-consumer ingester-second-consumer

build-json-downloader:
@docker compose -f docker-compose.yaml build

start-json-downloader:
@docker compose -f docker-compose.yaml up -d

build-backfiller:
@docker compose -f docker-compose.yaml build backfiller-consumer backfiller

start-backfiller:
@docker compose -f docker-compose.yaml up -d backfiller-consumer backfiller
@docker compose -f docker-compose.yaml up -d ingester-first-consumer

dev:
@docker compose -f docker-compose.yaml up -d db

stop:
@docker compose -f docker-compose.yaml stop ingester-first-consumer ingester-second-consumer backfiller
@docker compose -f docker-compose.yaml stop ingester-first-consumer
43 changes: 0 additions & 43 deletions api.Dockerfile

This file was deleted.

39 changes: 0 additions & 39 deletions backfiller.Dockerfile

This file was deleted.

11 changes: 0 additions & 11 deletions digital_asset_types/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,29 +12,18 @@ sql_types = ["sea-orm"]
[dependencies]
spl-concurrent-merkle-tree = { version = "0.1.3" }
sea-orm = { optional = true, version = "0.10.6", features = ["macros", "runtime-tokio-rustls", "sqlx-postgres", "with-chrono", "mock"] }
sea-query = { version = "0.28.1", features = ["postgres-array"] }
serde = { version = "1.0.137", optional = true }
serde_json = { version = "1.0.81", optional = true, features = ["preserve_order"] }
bs58 = "0.4.0"
borsh = { version = "0.9.3", optional = true }
borsh-derive = { version = "0.9.3", optional = true }
solana-sdk = { version = "1.14.10" }
num-traits = "0.2.15"
num-derive = "0.3.3"
thiserror = "1.0.31"
blockbuster = { git = "https://github.com/metaplex-foundation/blockbuster.git", rev = "552aba6a" }
jsonpath_lib = "0.3.0"
mime_guess = "2.0.4"
url = "2.3.1"
futures = "0.3.25"
reqwest = "0.11.13"
async-trait = "0.1.60"
tokio = { version = "1.22.0", features = ["full"] }
schemars = "0.8.6"
schemars_derive = "0.8.6"
log = "0.4.17"
indexmap = "1.9.3"
hex = "0.4.3"
rocks-db = { path = "../rocks-db" }
postgre-client = { path = "../postgre-client" }
entities = { path = "../entities" }
43 changes: 21 additions & 22 deletions digital_asset_types/src/dao/scopes/asset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -363,6 +363,7 @@ fn convert_rocks_offchain_data(
let ch_data: serde_json::Value = serde_json::from_str(
dynamic_data
.onchain_data
.value
.clone()
.unwrap_or_default()
.as_ref(),
Expand All @@ -376,7 +377,7 @@ fn convert_rocks_offchain_data(
metadata_url: offchain_data.url.clone(),
metadata_mutability: Mutability::Immutable,
metadata: Json::from_str(metadata.as_str()).map_err(|e| DbErr::Custom(e.to_string()))?,
slot_updated: dynamic_data.slot_updated as i64,
slot_updated: dynamic_data.get_slot_updated() as i64,
reindex: None,
})
}
Expand Down Expand Up @@ -408,46 +409,43 @@ fn convert_rocks_asset_model(
} else {
Some(leaf.tree_id.to_bytes().to_vec())
};
let slot_updated = vec![
dynamic_data.slot_updated,
owner.slot_updated,
leaf.slot_updated,
]
.into_iter()
.max()
.unwrap(); // unwrap here is save, because vec is not empty
let slot_updated = vec![owner.get_slot_updated(), leaf.slot_updated]
.into_iter()
.max()
.unwrap(); // unwrap here is safe, because vec is not empty

Ok(asset::Model {
id: static_data.pubkey.to_bytes().to_vec(),
alt_id: None,
specification_version: Some(SpecificationVersions::V1),
specification_asset_class: Some(static_data.specification_asset_class.into()),
owner: Some(owner.owner.to_bytes().to_vec()),
owner_type: owner.owner_type.into(),
delegate: owner.delegate.map(|pk| pk.to_bytes().to_vec()),
frozen: dynamic_data.is_frozen,
owner: Some(owner.owner.value.to_bytes().to_vec()),
owner_type: owner.owner_type.value.into(),
delegate: owner.delegate.value.map(|pk| pk.to_bytes().to_vec()),
frozen: dynamic_data.is_frozen.value,
supply: dynamic_data
.supply
.value
.map(|supply| supply as i64)
.unwrap_or_default(),
supply_mint: Some(static_data.pubkey.to_bytes().to_vec()),
compressed: dynamic_data.is_compressed,
compressible: dynamic_data.is_compressible,
seq: dynamic_data.seq.and_then(|u| u.try_into().ok()),
compressed: dynamic_data.is_compressed.value,
compressible: dynamic_data.is_compressible.value,
seq: dynamic_data.seq.value.and_then(|u| u.try_into().ok()),
tree_id,
leaf: leaf.leaf.clone(),
nonce: leaf.nonce.map(|nonce| nonce as i64),
royalty_target_type: static_data.royalty_target_type.into(),
royalty_target: None, // TODO
royalty_amount: dynamic_data.royalty_amount as i32,
royalty_amount: dynamic_data.royalty_amount.value as i32,
asset_data: Some(static_data.pubkey.to_bytes().to_vec()),
burnt: dynamic_data.is_burnt,
burnt: dynamic_data.is_burnt.value,
created_at: Some(static_data.created_at),
slot_updated: Some(slot_updated as i64),
data_hash: leaf.data_hash.map(|h| h.to_string()),
creator_hash: leaf.creator_hash.map(|h| h.to_string()),
owner_delegate_seq: owner.owner_delegate_seq.map(|seq| seq as i64),
was_decompressed: dynamic_data.was_decompressed,
owner_delegate_seq: owner.owner_delegate_seq.value.map(|seq| seq as i64),
was_decompressed: dynamic_data.was_decompressed.value,
leaf_seq: leaf.leaf_seq.map(|seq| seq as i64),
})
}
Expand Down Expand Up @@ -553,6 +551,7 @@ fn convert_rocks_creators_model(

dynamic_data
.creators
.value
.iter()
.enumerate()
.map(|(position, creator)| asset_creators::Model {
Expand All @@ -561,8 +560,8 @@ fn convert_rocks_creators_model(
creator: creator.creator.to_bytes().to_vec(),
share: creator.creator_share as i32,
verified: creator.creator_verified,
seq: Some(dynamic_data.slot_updated as i64),
slot_updated: Some(dynamic_data.slot_updated as i64),
seq: dynamic_data.seq.value.map(|seq| seq as i64),
slot_updated: Some(dynamic_data.get_slot_updated() as i64),
position: position as i16,
})
.collect::<Vec<_>>()
Expand Down
64 changes: 0 additions & 64 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
@@ -1,14 +1,5 @@
version: "3.9"
services:
migrator:
depends_on:
- db
environment:
DATABASE_URL: postgres://solana:solana@db:5432/v1
build:
context: .
dockerfile: migrator.Dockerfile

ingester-first-consumer:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe we can drop ingester-second-consumer as well?

container_name: ingester-first-consumer
restart: always
Expand Down Expand Up @@ -54,57 +45,6 @@ services:
options:
max-size: "2048m"

backfiller:
container_name: backfiller
restart: always
entrypoint: "./backfiller"
env_file:
- .env
depends_on:
- db
build:
context: .
dockerfile: backfiller.Dockerfile
volumes:
- ./creds.json:/usr/src/app/creds.json
logging:
options:
max-size: "2048m"

backfiller-consumer:
container_name: backfiller-consumer
restart: always
entrypoint: "./backfiller_consumer"
env_file:
- .env
ports:
- "${INGESTER_BACKFILL_CONSUMER_METRICS_PORT}:${INGESTER_BACKFILL_CONSUMER_METRICS_PORT}"
depends_on:
- db
build:
context: .
dockerfile: backfiller.Dockerfile
logging:
options:
max-size: "2048m"

bg_task_runner:
container_name: bg_task_runner
restart: always
entrypoint: "./bg_task_runner"
env_file:
- .env
ports:
- "${INGESTER_BG_TASK_RUNNER_METRICS_PORT}:${INGESTER_BG_TASK_RUNNER_METRICS_PORT}"
depends_on:
- db
build:
context: .
dockerfile: ingester.Dockerfile
logging:
options:
max-size: "2048m"

db:
container_name: db
image: 'postgres:14'
Expand All @@ -121,7 +61,3 @@ services:
logging:
options:
max-size: "100m"

volumes:
grafana_data: { }
graphite_data: { }
3 changes: 2 additions & 1 deletion entities/src/enums.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,9 @@ pub enum SpecificationAssetClass {
IdentityNft,
}

#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)]
#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)]
pub enum OwnerType {
#[default]
Unknown,
Token,
Single,
Expand Down
Loading