Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Drop core-indexing container #314

Merged
merged 9 commits into from
Jan 15, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ num-traits = "0.2.17"

# Configuration, env-vars and cli parsing
figment = { version = "0.10.6", features = ["env", "toml", "yaml"] }
clap = { version = "4.2.2", features = ["derive", "cargo"] }
clap = { version = "4.2.2", features = ["derive", "cargo", "env"] }
dotenvy = "0.15.7"
indicatif = "0.17"

Expand Down
53 changes: 3 additions & 50 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -97,50 +97,21 @@ services:
options:
max-size: "2048m"

raw-backfiller:
container_name: raw-backfiller
restart: always
entrypoint: sh -c "if [ -z '$$MALLOC_CONF' ]; then exec ./raw_backfiller; else exec ./profiling_raw_backfiller; fi"
env_file:
- .env
network_mode: host
volumes:
- ${INGESTER_ROCKS_DB_PATH}:${INGESTER_ROCKS_DB_PATH_CONTAINER}:rw
- ${INGESTER_PROFILING_FILE_PATH}:${INGESTER_PROFILING_FILE_PATH_CONTAINER}:rw
- ./creds.json:/usr/src/app/creds.json
- ./heaps:/usr/src/app/heaps:rw
stop_grace_period: 5m
build:
context: .
dockerfile: ingester.Dockerfile
logging:
options:
max-size: "2048m"

slot-persister:
container_name: slot-persister
restart: always
entrypoint: |
sh -c "
ARGS=\"--target-db-path $target_db_path\"
ARGS=\"$$ARGS --rpc-host $rpc_host\"
[ -n \"$start_slot\" ] && ARGS=\"$$ARGS --start-slot $start_slot\"
[ -n \"$big_table_credentials\" ] && ARGS=\"$$ARGS --big-table-credentials $big_table_credentials\"
[ -n \"$big_table_timeout\" ] && ARGS=\"$$ARGS --big-table-timeout $big_table_timeout\"
[ -n \"$metrics_port\" ] && ARGS=\"$$ARGS --metrics-port $metrics_port\"
[ -n \"$chunk_size\" ] && ARGS=\"$$ARGS --chunk-size $chunk_size\"
[ -n \"$max_concurrency\" ] && ARGS=\"$$ARGS --max-concurrency $max_concurrency\"

if [ -z \"$MALLOC_CONF\" ]; then
exec ./slot_persister $$ARGS
exec ./slot_persister
else
exec ./profiling_slot_persister $$ARGS
exec ./profiling_slot_persister
fi"
env_file:
- .env
network_mode: host
volumes:
- ${target_db_path}:${target_db_path}:rw
- ${TARGET_DB_PATH}:${TARGET_DB_PATH}:rw
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

value missing from .env.example.
Also some conflicts with the #310 are expected

- ${INGESTER_PROFILING_FILE_PATH}:${INGESTER_PROFILING_FILE_PATH_CONTAINER}:rw
- ${big_table_credentials:-/tmp/creds.json}:${big_table_credentials:-/tmp/creds.json}
- ./heaps:/usr/src/app/heaps:rw
Expand All @@ -152,24 +123,6 @@ services:
options:
max-size: "2048m"

core-indexing:
container_name: core-indexing
restart: always
entrypoint: sh -c "if [ -z '$$MALLOC_CONF' ]; then exec ./core_indexing; else exec ./profiling_core_indexing; fi"
env_file:
- .env
network_mode: host
volumes:
- ${INGESTER_PROFILING_FILE_PATH}:${INGESTER_PROFILING_FILE_PATH_CONTAINER}:rw
- ./heaps:/usr/src/app/heaps:rw
stop_grace_period: 5m
build:
context: .
dockerfile: ingester.Dockerfile
logging:
options:
max-size: "2048m"

db:
container_name: db
image: 'postgres:14'
Expand Down
6 changes: 2 additions & 4 deletions ingester.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,12 @@ RUN cargo chef cook --release --recipe-path recipe.json
# Building the services
FROM cacher AS builder
COPY . .
RUN cargo build --release --bin ingester --bin api --bin raw_backfiller --bin synchronizer --bin slot_persister
RUN cargo build --release --bin ingester --bin api --bin synchronizer --bin slot_persister

# Building the profiling feature services
FROM cacher AS builder-with-profiling
COPY . .
RUN cargo build --release --features profiling --bin ingester --bin api --bin raw_backfiller --bin synchronizer --bin slot_persister
RUN cargo build --release --features profiling --bin ingester --bin api --bin synchronizer --bin slot_persister

# Final image
FROM rust:1.76-slim-bullseye AS runtime
Expand All @@ -52,12 +52,10 @@ ENV TZ=Etc/UTC APP_USER=appuser LD_PRELOAD="/usr/local/lib/libjemalloc.so.2"
RUN groupadd $APP_USER && useradd -g $APP_USER $APP_USER && mkdir -p ${APP}

COPY --from=builder /rust/target/release/ingester ${APP}/ingester
COPY --from=builder /rust/target/release/raw_backfiller ${APP}/raw_backfiller
COPY --from=builder /rust/target/release/api ${APP}/api
COPY --from=builder /rust/target/release/synchronizer ${APP}/synchronizer
COPY --from=builder /rust/target/release/slot_persister ${APP}/slot_persister
COPY --from=builder-with-profiling /rust/target/release/ingester ${APP}/profiling_ingester
COPY --from=builder-with-profiling /rust/target/release/raw_backfiller ${APP}/profiling_raw_backfiller
COPY --from=builder-with-profiling /rust/target/release/api ${APP}/profiling_api
COPY --from=builder-with-profiling /rust/target/release/synchronizer ${APP}/profiling_synchronizer
COPY --from=builder-with-profiling /rust/target/release/slot_persister ${APP}/profiling_slot_persister
Expand Down
18 changes: 9 additions & 9 deletions nft_ingester/src/bin/slot_persister/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,40 +41,40 @@ const SLOT_COLLECTION_OFFSET: u64 = 300;
)]
struct Args {
/// Path to the target RocksDB instance with slots
#[arg(short, long)]
#[arg(short, long, env)]
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

specify the Env name here?

target_db_path: PathBuf,

/// RPC host
#[arg(short, long)]
#[arg(short, long, env)]
rpc_host: String,

/// Optional starting slot number, this will override the last saved slot in the RocksDB
#[arg(short, long)]
#[arg(short, long, env)]
start_slot: Option<u64>,

/// Big table credentials file path
#[arg(short, long)]
#[arg(short, long, env)]
big_table_credentials: Option<String>,

/// Optional big table timeout (default: 1000)
#[arg(short = 'B', long, default_value_t = 1000)]
#[arg(short = 'B', long, env, default_value_t = 1000)]
big_table_timeout: u32,

/// Metrics port
/// Default: 9090
#[arg(short, long, default_value = "9090")]
#[arg(short, long, env, default_value = "9090")]
metrics_port: u16,

/// Number of slots to process in each batch
#[arg(short, long, default_value_t = 200)]
#[arg(short, long, env, default_value_t = 200)]
chunk_size: usize,

/// Maximum number of concurrent requests
#[arg(short = 'M', long, default_value_t = 20)]
#[arg(short = 'M', long, env, default_value_t = 20)]
max_concurrency: usize,

/// Optional comma-separated list of slot numbers to check
#[arg(long)]
#[arg(long, env)]
slots: Option<String>,
}
pub struct InMemorySlotsDumper {
Expand Down