diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b521497a4..77e09e9ef 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -99,3 +99,14 @@ jobs: - build - unit-tests - e2e-tests + + build-localnet-image: + uses: ./.github/workflows/docker-build-localnet-image.yaml + secrets: inherit + # Dependencies are not strictly necessary, but if fendermint tests pass they publish docker too, so they better work. + # It is because of these needs that all the filters are allowed to run on `main` too, otherwise this would be disabled. + # It could be done in a more granular approach inside the workflows to allow the job to pass but opt-out of testing, + # but I guess it doesn't hurt to run a final round of unconditional tests, even though it takes longer to publish. + if: github.ref == 'refs/heads/main' + needs: + - docker-publish diff --git a/.github/workflows/docker-build-localnet-image.yaml b/.github/workflows/docker-build-localnet-image.yaml new file mode 100644 index 000000000..a4d05da4b --- /dev/null +++ b/.github/workflows/docker-build-localnet-image.yaml @@ -0,0 +1,39 @@ +name: Build Localnet Image +on: + # Runs on non-main branches where the fendermint image must be built locally + pull_request: + + # runs on main branch after the fendermint image has been published + workflow_call: + +jobs: + build-localnet-image: + runs-on: self-hosted + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_HUB_USERNAME }} + password: ${{ secrets.DOCKER_HUB_PASSWORD }} + + - name: Install Nushell + run: ./deployment/set-up-nu.sh + + - name: Build Localnet Image + working-directory: ./deployment + run: | + set -eux + + source ./.nu/activate.sh + + if [ "${{ github.ref }}" == "refs/heads/main" ]; then + hash=$(git rev-parse --short=7 HEAD) + flags="--fendermint-image textile/fendermint:sha-$hash --push-multi-arch-tags textile/recall-localnet:sha-$hash,textile/recall-localnet:latest" + else + flags="--rebuild-fendermint-image --local-image-tag recall-localnet" + fi + + ./localnet.nu build-docker-image $flags --node-count 2 diff --git a/.github/workflows/docker-publish.yaml b/.github/workflows/docker-publish.yaml index 592989845..b83358887 100644 --- a/.github/workflows/docker-publish.yaml +++ b/.github/workflows/docker-publish.yaml @@ -78,22 +78,3 @@ jobs: export BUILDX_STORE="--push" export BUILDX_FLAGS="--platform linux/amd64,linux/arm64" cd fendermint && make docker-build - - # Publish Localnet image using the Fendermint image built above - publish_localnet: - name: Publish Localnet image - runs-on: self-hosted - needs: publish - steps: - - name: Trigger localnet image publish in recall-deploy - run: | - echo "Using Fendermint commit hash: ${{ needs.publish.outputs.commit-hash }}" - curl -X POST --location "https://api.github.com/repos/recallnet/recall-deploy/dispatches" \ - --header "Authorization: token ${{ secrets.PA_TOKEN }}" \ - --header "Accept: application/vnd.github.v3+json" \ - --data '{ - "event_type": "remote-trigger", - "client_payload": { - "fendermint-sha": "${{ needs.publish.outputs.commit-hash }}" - } - }' diff --git a/deployment/.gitignore b/deployment/.gitignore new file mode 100644 index 000000000..08994ed90 --- /dev/null +++ b/deployment/.gitignore @@ -0,0 +1,3 @@ +/.nu/ +/localnet-data* +/testnet-data-* diff --git a/deployment/README.md b/deployment/README.md new file mode 100644 index 000000000..4141e367e --- /dev/null +++ b/deployment/README.md @@ -0,0 +1,12 @@ +# Localnet Scripts + +## Install nushell +If you do not have nushell installed, you can call `./set-up-nu.sh` that will download the required nushell version to `./.nu`. +You can add `nu` to your `PATH` with `source ./.nu/activate.sh` + +## Usage +You can run the localnet in two ways: +* `./localnet.nu run` - runs all localnet services on the local docker. See `./localnet.nu run -h` for details. +* `./localnet.nu run-dind` - downloads the latest `textile/recall-localnet` docker image and runs all services inside a single container. This is faster than the previous option. + +See `./localnet.nu -h` for details. diff --git a/deployment/docker/anvil.Dockerfile b/deployment/docker/anvil.Dockerfile new file mode 100644 index 000000000..2e42bf68c --- /dev/null +++ b/deployment/docker/anvil.Dockerfile @@ -0,0 +1,13 @@ +FROM ubuntu:24.04 + +RUN set -x; \ + arch=$(uname -m | sed -e s/aarch64/arm64/ -e s/x86_64/amd64/); \ + apt update && apt install -y curl && \ + curl -Lo /tmp/tt.tgz https://github.com/foundry-rs/foundry/releases/download/stable/foundry_stable_linux_${arch}.tar.gz && \ + tar xvf /tmp/tt.tgz -C /usr/bin && \ + rm /tmp/tt.tgz + +RUN mkdir -p /workdir +WORKDIR /workdir + +ENTRYPOINT ["anvil", "--host", "0.0.0.0", "--state", "/workdir/state"] diff --git a/deployment/docker/entrypoint-localnet.sh b/deployment/docker/entrypoint-localnet.sh new file mode 100755 index 000000000..5f20a1e54 --- /dev/null +++ b/deployment/docker/entrypoint-localnet.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -ex + +# Setup signal handling for graceful shutdown +function cleanup() { + echo "Received termination signal. Shutting down containers..." + cd /workdir/localnet-data + for d in node-*; do + ( + cd $d/workdir + docker compose down + ) + done + docker stop localnet-anvil + docker network rm recall-localnet + pkill -TERM dockerd + exit 0 +} + +# Register the cleanup function for these signals +trap cleanup SIGTERM SIGINT + +nohup dockerd &> /dev/null & +DOCKERD_PID=$! +while ! docker info > /dev/null; do + sleep 1 +done + +docker build -t anvil -f ./docker/anvil.Dockerfile ./docker +docker network create recall-localnet || true +docker run --rm --name localnet-anvil -u nobody -d --network recall-localnet -p 0.0.0.0:8545:8545 -v /workdir/localnet-data/anvil:/workdir anvil + +cd localnet-data +for d in node-*; do + ( + cd $d/workdir + docker compose up -d + ) +done + +# Keep container running until terminated +echo "All containers started. Waiting for termination signal..." +wait $DOCKERD_PID diff --git a/deployment/docker/localnet.Dockerfile b/deployment/docker/localnet.Dockerfile new file mode 100644 index 000000000..cbaaaf13b --- /dev/null +++ b/deployment/docker/localnet.Dockerfile @@ -0,0 +1,17 @@ +FROM docker + +RUN apk add bash + +RUN mkdir -p /workdir +WORKDIR /workdir + +COPY docker /workdir/docker +COPY localnet-data /workdir/localnet-data +RUN chown -R nobody:nobody /workdir +ENV RECALL_NODE_USER=nobody +RUN ls -la /workdir/localnet-data/anvil/ + +# This is needed to expose DIND endpoints correctly from inside the localnet container +ENV LOCALNET_CLI_BIND_HOST=0.0.0.0 + +ENTRYPOINT ["./docker/entrypoint-localnet.sh"] diff --git a/deployment/docker/subnet-setup.Dockerfile b/deployment/docker/subnet-setup.Dockerfile new file mode 100644 index 000000000..c4b02fb58 --- /dev/null +++ b/deployment/docker/subnet-setup.Dockerfile @@ -0,0 +1,34 @@ +# This image is used to set up a subnet. +# It contains the required ipc-cli and fendermint versions and the foundry tools. + +ARG fendermint_image + +FROM $fendermint_image + +# Install foundry +RUN set -ex; \ + arch=$(uname -m | sed -e s/aarch64/arm64/ -e s/x86_64/amd64/); \ + curl -Lo /tmp/tt.tgz https://github.com/foundry-rs/foundry/releases/download/stable/foundry_stable_linux_${arch}.tar.gz && \ + tar xvf /tmp/tt.tgz -C /bin && \ + rm /tmp/tt.tgz + + +ARG NODE_VERSION=22.14.0 + +# Install build tools +RUN set -ex; \ + apt update; \ + apt install -y git make python3 build-essential; \ + curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh | bash; + +RUN set -ex; \ + \. $HOME/.nvm/nvm.sh; \ + nvm install $NODE_VERSION; \ + nvm use v$NODE_VERSION; \ + nvm alias default v$NODE_VERSION; + +ENV PATH="/fendermint/.nvm/versions/node/v${NODE_VERSION}/bin/:${PATH}" + +RUN npm install -g pnpm + +ENTRYPOINT ["/bin/bash"] diff --git a/deployment/lib/local-files.nu b/deployment/lib/local-files.nu new file mode 100644 index 000000000..b1329a376 --- /dev/null +++ b/deployment/lib/local-files.nu @@ -0,0 +1,128 @@ + +export def add-subnet-to-ipc-config [] { + open $env.state.config.ipc_config_file | + update subnets { $in | append { + id: $env.state.subnet_id + config: { + network_type: "fevm" + provider_http: $env.state.config.subnet.rpc_url + gateway_addr: "0x77aa40b105843728088c0132e43fc44348881da8" + registry_addr: "0x74539671a1d2f1c8f200826baba665179f53a1b7" + } + }} | + to toml | + save -f $env.state.config.ipc_config_file +} + +export def write-subnet-config [dest: string, --bootstrap] { + mut cfg = { + address_network: "testnet" + parent_chain: { + chain_id: $env.state.config.parent_chain.chain_id + addresses: { + gateway: $env.state.gateway_address + registry: $env.state.registry_address + supply_source: $env.state.supply_source_address + subnet_contract: $env.state.subnet_eth_address + validator_gater: $env.state.validator_gater_address + validator_rewarder: $env.state.validator_rewarder_address + } + } + subnet: { + subnet_id: $env.state.subnet_id + chain_id: $env.state.config.subnet.chain_id + } + endpoints: { + cometbft_rpc_servers: $env.state.config.subnet.cometbft_rpc_servers + cometbft_persistent_peers: ($env.state.peers?.cometbft | default [] | uniq) + fendermint_seeds: ($env.state.peers?.fendermint | default [] | uniq) + } + } + + if not $bootstrap { + $cfg = ($cfg | merge deep { + subnet: { + addresses: { + credit_manager: $env.state.creditManager_contract_address + bucket_manager: $env.state.bucketManager_contract_address + faucet_contract: $env.state.faucet_contract_address + blob_manager: $env.state.blobManager_contract_address + } + } + endpoints: { + evm_rpc_url: $env.state.config.subnet.rpc_url + } + }) + } + + $cfg | save -f $dest +} + +export def build-setup-docker-image [] { + cd docker + docker build ...[ + --build-arg $"fendermint_image=($env.state.config.fendermint_image)" + -t $env.state.config.setup_image + -f subnet-setup.Dockerfile . + ] +} + +export def build-fendermint-image [] { + if $env.state.config.fendermint_image == "fendermint" { + cd ../fendermint + make docker-build + } +} + +export def set-fendermint-image [docker_compose_dir: string] { + cd $"($docker_compose_dir)/config" + let f = "node-default.toml" + open $f | update images.fendermint $env.state.config.fendermint_image | save -f $f +} + +# Write network config suitable for recall CLI into workdir. +export def write-recall-cli-config [] { + let endpoints = match $env.state.config.network { + "localnet" => ({ + subnet_config: { + rpc_url: "http://localhost:26657" + object_api_url: "http://localhost:8001" + evm_rpc_url: "http://localhost:8645" + } + parent_network_config: { + evm_rpc_url: "http://localhost:8545" + } + }) + "testnet" => { + let base = $"($env.state.config.version).node-0.testnet.recall.network" + { + subnet_config: { + rpc_url: $"https://api.($base)" + object_api_url: $"https://objects.($base)" + evm_rpc_url: $"https://evm.($base)" + } + parent_network_config: { + evm_rpc_url: "https://api.calibration.node.glif.io" + } + } + } + } + + let contracts = { + subnet_config: { + chain_id: $env.state.config.subnet.chain_id + subnet_id: $env.state.subnet_id + evm_gateway_address: "0x77aa40b105843728088c0132e43fc44348881da8" + evm_registry_address: "0x74539671a1d2f1c8f200826baba665179f53a1b7" + } + parent_network_config: { + evm_gateway_address: $env.state.gateway_address + evm_registry_address: $env.state.registry_address + evm_supply_source_address: $env.state.supply_source_address + } + } + + let cfg = {} | insert $env.state.config.network ($endpoints | merge deep $contracts) + + $cfg | save -f ($env.state.config.workdir | path join "networks.toml") +} diff --git a/deployment/lib/localnet.nu b/deployment/lib/localnet.nu new file mode 100644 index 000000000..93e8f6961 --- /dev/null +++ b/deployment/lib/localnet.nu @@ -0,0 +1,259 @@ +use ./local-files.nu +use ./state-engine.nu +use ./util.nu * + +export def run-localnet-node [ + ix: int, # node index + dc_repo: string, # recall-docker-compose repo to clone + dc_branch: string, # recall-docker-compose branch + --bootstrap, # run only essential services required to deploy subnet contracts + ] { + + let node_name = $"node-($ix)" + let node_dir = ($env.state.config.workdir | path join $node_name | path expand) + let repo = if ($dc_repo | str starts-with "..") { $dc_repo | path expand} else { $dc_repo } + mkdir $node_dir + cd $node_dir + if ($node_dir | path join ".git" | path exists) { + git checkout $dc_branch + git pull + } else { + git clone --branch $dc_branch $repo . + } + + local-files write-subnet-config ($node_dir | path join "config/network-localnet.toml") --bootstrap=$bootstrap + write-localnet-node-config $ix $bootstrap + ./init-workdir ./workdir + do { + cd ./workdir + docker compose up -d + } + + let ids = (./workdir/node-tools show-peer-ids | from yaml) + do $env.state.update { + peers: { + cometbft: [$"($ids.cometbft_id)@localnet-($node_name)-cometbft-1:26656"] + fendermint: [$"/dns/localnet-($node_name)-fendermint-1/tcp/26655/p2p/($ids.fendermint_id)"] + } + } +} + +export def stop-node [ix: int] { + let node_name = $"node-($ix)" + let node_dir = ($env.state.config.workdir | path join $node_name | path expand) + + if ($node_dir | path exists) { + cd ($node_dir + "/workdir") + docker compose down + } else { + echo $"Directory ($node_dir) does not exist, skipping." + } +} + +def write-localnet-node-config [node_ix: int, bootstrap: bool] { + let node_name = $"node-([$node_ix 0] | math max)" + let output_file = ($env.state.config.workdir | path join $node_name| path join "config/node.toml") + + let enable = { + sync: ($node_ix > 0) + relayer: ($node_ix == 0) + registrar: ($node_ix == 0 and not $bootstrap) + s3: ($node_ix == 0 and not $bootstrap) + } + mut cfg = { + network_name: localnet + node_name: $node_name + project_name: $"localnet-($node_name)" + node_private_key: ($env.state | get $"validator($node_ix)" | get private_key) + + images: { + fendermint: $env.state.config.fendermint_image + } + parent_endpoint: { + url: "http://localnet-anvil:8545" + } + networking: { + docker_network_subnet: $"10.222.($node_ix).0/24" + host_bind_ip: "" + } + services: { + cometbft_statesync_enable: $enable.sync + relayer_checkpoint_interval_sec: 10 + } + localnet: { + enable: true + network: recall-localnet + } + } + + if $node_ix == 0 { + $cfg = ($cfg | merge deep { + localnet: { + cli_bind_host: "127.0.0.1" + } + }) + } + if $enable.relayer { + $cfg = ($cfg | merge deep { + relayer: { + enable: true + + # FIXME: This is 3rd anvil key. Make it dynamic. + private_key: "0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6" + } + }) + } + if $enable.registrar { + $cfg = ($cfg | merge deep { + registrar: { + enable: true + faucet_owner_private_key: $env.state.faucet_owner.private_key + turnstile_secret_key: nonsense + trusted_proxy_ips: "10.0.0.0" # dummy + } + }) + } + if $enable.s3 { + $cfg = ($cfg | merge deep { + recall_s3: { + enable: true + access_key: "user1" + secret_key: "hello-recall" + domain: "localhost" + } + }) + } + + $cfg | save -f $output_file +} + +export def run-anvil [workdir: string] { + docker build -t anvil -f ./docker/anvil.Dockerfile ./docker + let found = (docker network ls | lines | find "recall-localnet" | length) + if $found == 0 { + docker network create recall-localnet + } + let anvil_dir = $"($workdir)/anvil" + mkdir $anvil_dir + docker run ...[ + --rm -d + -u $"(id -u):(id -g)" + --name localnet-anvil + -p 127.0.0.1:8545:8545 + --network recall-localnet + -v $"($anvil_dir):/workdir" + anvil + ] +} + +export def stop-anvil [] { + # We want a graceful stop so that anvil can dump all its state to the state file. + # Without this wait anvil can hang on termination. + sleep 1sec + docker stop localnet-anvil + + # Verify the state file was created + if not ( $env.state.config.workdir | path join "anvil/state" | path exists) { + print "ERROR: anvil failed to dump its state" + exit 5 + } +} + +export def stop-network [workdir: string, --force] { + if $force { + docker ps -a --format json | lines | each {from json} | where Names =~ $"localnet-" | each {docker rm -f $in.ID} + } else { + glob ($workdir + "/node-*") | reverse | each {|dir| + cd ($dir | path join "workdir") + docker compose down + } + + let state = state-engine read-state (state-file $workdir) + if (docker ps | lines | find localnet-anvil | length) == 1 { + stop-anvil + } + do $state.update { graceful_shutdown: true} + } +} + +export def build-dind-image [local_image_tag: any, push_multi_arch_tags: any] { + let found = (docker buildx ls | lines | find "multi-arch-builder" | length) + if $found == 0 { + docker buildx create --name multi-arch-builder --driver docker-container + } + + def build-local [tag:string] { + docker buildx build -t $tag --load -f docker/localnet.Dockerfile . + } + + if ($local_image_tag | is-not-empty) { + build-local $local_image_tag + } else if ($push_multi_arch_tags | is-not-empty) { + let tags = $push_multi_arch_tags | split row ',' | each {|tag| [-t $tag]} | flatten + docker buildx build --builder=multi-arch-builder --platform linux/amd64,linux/arm64 --push ...$tags -f docker/localnet.Dockerfile . + } else { + build-local recall-localnet + } +} + +export def wait-for-sync [ ix: int ] { + loop { + print "== calling cometbft..." + let result = (run-in-container curl '-s' '-m' 2 $"http://localnet-node-($ix)-cometbft-1:26657/status" | complete) + if $result.exit_code == 0 { + let cu = ($result.stdout | from json | get result.sync_info.catching_up ) + if $cu == false { + break + } + } else { + print $"== stdout: ($result.stdout)" + print $"== stderr: ($result.stderr)" + } + print "waiting for sync..." + sleep 10sec + } +} + +export def wait-for-cometbft [ ix: int ] { + loop { + print "== calling cometbft..." + let result = (run-in-container curl '-s' '-m' 2 $"http://localnet-node-($ix)-cometbft-1:26657/status" | complete) + if $result.exit_code == 0 { + let block_height = ($result.stdout | from json | get result.sync_info.latest_block_height ) + if ($block_height | str length) > 0 and ($block_height | into int) > 0 { + break + } + } + print "waiting for cometbft..." + sleep 2sec + } +} + +# Create workdir and the config file +export def init-state [ + workdir: string, + fendermint_image: string, + --parent-rpc-url: string = "http://localnet-anvil:8545", + ] { + + let base_config = (get-base-config $workdir "localnet" $fendermint_image) + let cfg = { + bottomup_check_period: 10 + docker_network: "recall-localnet" + parent_chain: { + rpc_url: $parent_rpc_url + gas_estimate_multiplier: 10000 + network: "localnet" + chain_id: 31337 + } + subnet: { + chain_id: 248163216 + rpc_url: "http://localnet-node-0-ethapi-1:8545" + cometbft_rpc_servers: [ "http://localnet-node-0-cometbft-1:26657" ] + } + } + let state = { + config: ($base_config | merge $cfg) + } + state-engine update-state ($workdir | path join "state.yml") $state +} diff --git a/deployment/lib/parent-chain.nu b/deployment/lib/parent-chain.nu new file mode 100644 index 000000000..bbb538b2d --- /dev/null +++ b/deployment/lib/parent-chain.nu @@ -0,0 +1,315 @@ +use util.nu * + +def rpc-url [] { + if ("token" in $env.state.config.parent_chain) { + $"($env.state.config.parent_chain.rpc_url)?token=($env.state.config.parent_chain.token)" + } else { + $env.state.config.parent_chain.rpc_url + } +} + +def balance [addr: string] { + cast balance --rpc-url (rpc-url) $addr | into float +} + +export def ensure-funds [address: string] { + let b = (balance $address) + log $"Balance of ($address): ($b / 1e18)" + if $b == 0 { + print $"(ansi green)==== Goto to https://faucet.calibnet.chainsafe-fil.io/funds.html and fund ($address) (ansi reset)" + } else { + return + } + loop { + sleep 15sec + let b = (balance $address) + log $"balance: ($b / 1e18)" + if $b == 0 { + log "waiting for funds" + } else { + break + } + } +} + +export def write-ipc-cli-config [] { + mkdir $env.state.config.ipc_config_dir + + let gateway_addr = if "gateway_address" in $env.state {$env.state.gateway_address} else {"0x0000000000000000000000000000000000000000"} + let registry_addr = if "registry_address" in $env.state {$env.state.registry_address} else {"0x0000000000000000000000000000000000000000"} + + let token = if ("token" in $env.state.config.parent_chain) {{auth_token: $env.state.config.parent_chain.token}} else {{}} + let calibration = { + id: $"/r($env.state.config.parent_chain.chain_id)" + config: ({ + network_type: "fevm" + provider_http: $env.state.config.parent_chain.rpc_url + gateway_addr: $gateway_addr + registry_addr: $registry_addr + } | merge $token) + } + + let cfg = { + keystore_path: "/fendermint/.ipc" + subnets: [$calibration] + } + $cfg | save -f $env.state.config.ipc_config_file + + do $env.state.update {parent: $calibration} +} + +export def deploy-validator-gater [] { + run-in-container ...[ + $"cd ($env.state.config.docker_ipc_src_dir)/recall-contracts;" + "forge clean;" + "forge install;" + forge script script/ValidatorGater.s.sol + --private-key $env.state.validator0.private_key + --rpc-url (rpc-url) + --tc DeployScript --sig "'run()'" + --broadcast -g (get-gas-estimate-multiplier) -vv + ] + + let validator_gater = (open $"($env.state.config.ipc_src_dir)/recall-contracts/broadcast/ValidatorGater.s.sol/($env.state.config.parent_chain.chain_id)/run-latest.json" | + get transactions | where contractName == ERC1967Proxy | get 0.contractAddress) + do $env.state.update {validator_gater_address: $validator_gater} +} + +export def approve-validator-power [validator, min: float, max: float] { + cast-retry "approve-validator-power" [ + send --private-key $env.state.validator0.private_key + --rpc-url (rpc-url) + --timeout 120 + --confirmations 10 + $env.state.validator_gater_address + "'approve(address,uint256,uint256)'" + $validator.address + $min $max + ] +} + +export def create-subnet [] { + let result = (run-in-container ipc-cli ...[ + subnet create + --from $env.state.validator0.address + --parent $"/r($env.state.config.parent_chain.chain_id)" + --min-validators 0 + --min-validator-stake 1 + --bottomup-check-period $env.state.config.bottomup_check_period + --active-validators-limit 40 + --permission-mode collateral + --supply-source-kind erc20 + --supply-source-address $env.state.supply_source_address + --validator-gater $env.state.validator_gater_address + --validator-rewarder $env.state.validator_rewarder_address + --collateral-source-kind erc20 + --collateral-source-address $env.state.supply_source_address + ] | complete) + if $result.exit_code != 0 { + print $result.stdout + print $result.stderr + exit $result.exit_code + } + let subnet_id = ($result.stdout | tee { print }| lines | find "created subnet actor" | get 0 | str replace -r '.*\/r' '/r' | str trim) + + do $env.state.update { + subnet_id: $subnet_id + subnet_eth_address: (f4-to-eth ($subnet_id | split row '/' | get 2)) + } +} + +export def set-subnet-in-validator-gater [] { + cast-retry "set-subnet-in-validator-gater" [ + send --private-key $env.state.validator0.private_key + --rpc-url (rpc-url) + --timeout 120 + --confirmations 10 + $env.state.validator_gater_address + "'setSubnet((uint64,address[]))'" + $"'\(($env.state.config.parent_chain.chain_id), [($env.state.subnet_eth_address)])'" + ] +} + +export def approve-subnet-contract [validator, amount: float] { + cast-retry "approve-subnet-contract" [ + send --private-key $validator.private_key + --rpc-url (rpc-url) + --timeout 120 + --confirmations 10 + $env.state.supply_source_address + "'approve(address,uint256)'" + $env.state.subnet_eth_address $amount + ] +} + +export def prefund-validator [validator, amount: float] { + cast-retry "prefund-validator" [ + send --private-key $validator.private_key + --rpc-url (rpc-url) + --timeout 120 + --confirmations 10 + $env.state.subnet_eth_address + "'preFund(uint256)'" + $amount + ] +} + +export def join-subnet [validator, stake: int] { + run-in-container ipc-cli ...[ + subnet join + --from $validator.address + --subnet $env.state.subnet_id + --collateral $stake + ] +} + +export def stake [validator, stake: int] { + run-in-container ipc-cli ...[ + subnet stake + --from $validator.address + --subnet $env.state.subnet_id + --collateral $stake + ] +} + +export def unstake [validator, stake: int] { + run-in-container ipc-cli ...[ + subnet unstake + --from $validator.address + --subnet $env.state.subnet_id + --collateral $stake + ] +} + +# Anvil does not tollerate too high gas estimate multipliers in some cases. +def get-gas-estimate-multiplier [] { + let s = $env.state + if $s.config.network == "localnet" { + 130 + } else { + $s.config.parent_chain.gas_estimate_multiplier + } +} + +export def deploy-supply-source [] { + run-in-container ...[ + $"cd ($env.state.config.docker_ipc_src_dir)/recall-contracts;" + 'forge clean;' + 'forge install;' + forge script script/Recall.s.sol + --private-key $env.state.supply_source_owner.private_key + --rpc-url (rpc-url) + --tc DeployScript + --sig "'run()'" + --broadcast + -g (get-gas-estimate-multiplier) + -vv + ] + + let supply_source = (open $"($env.state.config.ipc_src_dir)/recall-contracts/broadcast/Recall.s.sol/($env.state.config.parent_chain.chain_id)/run-latest.json" | get returns | values | where internal_type == "contract Recall" | get 0.value) + + do $env.state.update {supply_source_address: $supply_source } +} + +export def deploy-validator-rewarder [] { + run-in-container ...[ + $"cd ($env.state.config.docker_ipc_src_dir)/recall-contracts;" + "forge clean;" + "forge install;" + forge script script/ValidatorRewarder.s.sol + --private-key $env.state.validator0.private_key + --rpc-url (rpc-url) + --tc DeployScript + --sig "'run(address)'" $env.state.supply_source_address + --broadcast -g (get-gas-estimate-multiplier) -vv + ] + + let contract_address = (open $"($env.state.config.ipc_src_dir)/recall-contracts/broadcast/ValidatorRewarder.s.sol/($env.state.config.parent_chain.chain_id)/run-latest.json" | get returns | values | where internal_type == "contract ValidatorRewarder" | get 0.value) + + do $env.state.update {validator_rewarder_address: $contract_address } +} + +export def "prepare-contract-stack-deployment" [] { + run-in-container ...[ + "set -ex;" + $"rm -rf ($env.state.config.docker_ipc_src_dir)/node_modules;" + cd $"($env.state.config.docker_ipc_src_dir)/contracts;" + rm -rf "deployments;" + "npm install;" + ] +} + +export def "deploy-contract-stack" [] { + mut denv = { + REGISTRY_CREATION_PRIVILEGES: "unrestricted" + RPC_URL: (rpc-url) + PRIVATE_KEY: $env.state.supply_source_owner.private_key + } + mut stack_network = $env.state.config.parent_chain.network + if $env.state.config.network == "localnet" { + $stack_network = "auto" + $denv = ($denv | merge { + CHAIN_ID: $env.state.config.parent_chain.chain_id + RPC_URL: $env.state.config.parent_chain.rpc_url + }) + } + let out = (run-in-container --denv $denv ...[ + "set -ex;" + cd $"($env.state.config.docker_ipc_src_dir)/contracts;" + make deploy-stack $"NETWORK=($stack_network)" + ] | tee {print} | lines) + + def extract_field [pattern] { + $out | find $pattern | ansi strip | split row -r '\s+' | get 3 | str trim + } + + do $env.state.update { + gateway_address: (extract_field 'GatewayDiamond deployed') + registry_address: (extract_field 'SubnetRegistryDiamond deployed') + } +} + +export def grant-minter-role [address: string] { + let minter_role = (run-in-container cast keccak "MINTER_ROLE") + cast-retry "grant-minter-role" [ + send --private-key $env.state.supply_source_owner.private_key + --rpc-url (rpc-url) + --timeout 120 + $env.state.supply_source_address + "'grantRole(bytes32,address)'" $minter_role $address + ] +} + +export def send-funds [dest, amount: float, --from-private-key: string] { + let pk = if ($from_private_key | is-empty) {$env.state.supply_source_owner.private_key} else {$from_private_key} + cast-retry "send-funds" [ + send --private-key $pk + --rpc-url (rpc-url) + --timeout 120 + --confirmations 10 + --value $amount + $dest.address + ] +} + +export def mint-erc20 [address: string, amount: float] { + cast-retry "mint-erc20" [ + send --private-key $env.state.supply_source_owner.private_key + --rpc-url (rpc-url) + --timeout 120 + --confirmations 10 + $env.state.supply_source_address + "'mint(address,uint256)'" + $address + $amount + ] +} + +export def cross-msg-to-subnet [address: string, amount: float] { + run-in-container ipc-cli ...[ + cross-msg fund-with-token + --from $address + --subnet $env.state.subnet_id + --approve $amount + ] +} diff --git a/deployment/lib/state-engine.nu b/deployment/lib/state-engine.nu new file mode 100644 index 000000000..d89a578fa --- /dev/null +++ b/deployment/lib/state-engine.nu @@ -0,0 +1,44 @@ +# Runs steps and stores the state into the given file. +# Each step runs only once. +export def run [ + state_file: string, + steps: list, # list of records (name, fn), where fn takes $state as the argument and returns a state patch record. + --log-prefix: string, + ] { + def log [str: string] { + print $"(ansi '#f58c5f')== [step ($log_prefix | default "")] ($str)(ansi reset)" + } + + $steps | each { |step| + let state = read-state $state_file + if ($step.name not-in $state.completed_steps) { + log $step.name + do $step.fn $state + update-state $state_file {completed_steps: ($state.completed_steps | insert $step.name true)} + if ("ONE_STEP" in $env) { + exit 0 + } + } + } + log "== done ==" +} + +export def --env read-state [state_file: string] { + let update = {|patch| update-state $state_file $patch} + let state = (try { + open $state_file + } catch { + { + completed_steps: {} + } + }) | merge {update: $update} + $env.state = $state + $state +} + +export def update-state [state_file: string, patch: record] { + read-state $state_file | + merge deep --strategy=append $patch | + reject update | + save -f $state_file +} diff --git a/deployment/lib/steps.nu b/deployment/lib/steps.nu new file mode 100644 index 000000000..6d74d16cf --- /dev/null +++ b/deployment/lib/steps.nu @@ -0,0 +1,82 @@ +use ./util.nu +use ./parent-chain.nu +use ./local-files.nu +use ./subnet.nu + + +export def get-create-subnet-steps [get_funds_fn: closure] { + [ + { name: "build_setup_image" fn: { local-files build-setup-docker-image} } + { name: "create_ipc_config" fn: { parent-chain write-ipc-cli-config }} + + { name: "create_supply_source_owner_key" fn: { util create-wallet "supply_source_owner"} } + { name: "supply_source_owner_ensure_funds" fn: $get_funds_fn } + + { name: "deploy_supply_source" fn: { parent-chain deploy-supply-source } } + + { name: "prepare_gateway_and_registry" fn: { parent-chain prepare-contract-stack-deployment } } + { name: "deploy_gateway_and_registry" fn: { parent-chain deploy-contract-stack } } + { name: "update_ipc_config" fn: { parent-chain write-ipc-cli-config }} + + { name: "create_validator0_key" fn: { util create-wallet "validator0"} } + { name: "send_funds_to_validator0" fn: { parent-chain send-funds $env.state.validator0 20e18} } + { name: "deploy_validator_rewarder" fn: { parent-chain deploy-validator-rewarder } } + { name: "rewarder_grant_minter_role" fn: { parent-chain grant-minter-role $env.state.validator_rewarder_address }} + { name: "deploy_validator_gater" fn: { parent-chain deploy-validator-gater} } + { name: "mint_erc20" fn: { parent-chain mint-erc20 $env.state.validator0.address 1001e18} } + { name: "approve_validator_power" fn: { parent-chain approve-validator-power $env.state.validator0 1e18 1000e18} } + { name: "create_subnet" fn: { parent-chain create-subnet } } + { name: "set_subnet_in_validator_gater" fn: { parent-chain set-subnet-in-validator-gater} } + { name: "approve_subnet_contract" fn: { parent-chain approve-subnet-contract $env.state.validator0 1000e18} } + { name: "prefund_validator" fn: { parent-chain prefund-validator $env.state.validator0 100e18} } + { name: "join_subnet" fn: { parent-chain join-subnet $env.state.validator0 10} } + { name: "transfer_funds" fn: { parent-chain cross-msg-to-subnet $env.state.validator0.address 5e18} } + { name: "write_recall_subnet_config" fn: { local-files write-recall-cli-config } } + ] +} + +export def get-deploy-subnet-contracts-steps [set_up_contract_owner_steps: list] { + [ + { name: "wait_for_subnet" fn: { subnet wait-for-subnet} } + { name: "add_subnet_to_ipc_config" fn: { local-files add-subnet-to-ipc-config} } + + { name: "faucet_create_key" fn: { util create-wallet "faucet_owner"} } + { name: "send_funds_to_faucet_owner" fn: { parent-chain send-funds $env.state.faucet_owner 3e18} } + { name: "faucet_mint_erc20" fn: { parent-chain mint-erc20 $env.state.faucet_owner.address 1e30} } + { name: "faucet_transfer_tokens_to_subnet" fn: { parent-chain cross-msg-to-subnet $env.state.faucet_owner.address (1e30 - 2e18)} } + { name: "faucet_wait_for_funds" fn: { util wait-for-funds-on-subnet $env.state.faucet_owner.address} } + { name: "deploy_faucet_contract" fn: { subnet deploy-faucet-contract} } + { name: "fund_faucet_contract" fn: { subnet fund-faucet-contract (5e27)} } + { name: "faucet_set_drip_amount" fn: { subnet faucet-set-drip-amount 5e18} } + + ...$set_up_contract_owner_steps + { name: "deploy_blob_manager_contract" fn: { subnet deploy-subnet-contract ($env.state | get $env.state.subnet_contract_owner_ref) "BlobManager"} } + { name: "deploy_credit_contract" fn: { subnet deploy-subnet-contract ($env.state | get $env.state.subnet_contract_owner_ref) "CreditManager"} } + { name: "deploy_bucket_manager_contract" fn: { subnet deploy-subnet-contract ($env.state | get $env.state.subnet_contract_owner_ref) "BucketManager"} } + { name: "validator_check_funds_on_subnet" fn: { util wait-for-funds-on-subnet $env.state.validator0.address} } + ] +} + +export def prepare-validator [name: string, max_power: float] { + if $max_power < 1e18 { + print "ERROR: Max power is too low, it must be provided in wei units" + exit 1 + } + [ + { name: "wait_for_subnet" fn: { subnet wait-for-subnet} } + { name: $"($name)_get_funds_on_parent_chain" fn: { parent-chain send-funds ($env.state | get $name) 2e18} } + { name: $"($name)_mint_erc20" fn: { parent-chain mint-erc20 ($env.state | get $name | get address) ($max_power * 2) } } + { name: $"($name)_approve_validator_power" fn: { parent-chain approve-validator-power ($env.state | get $name) 1e18 $max_power} } + ] +} + +export def join-subnet [node_name: string, power: int] { + if $power >= 1e18 { + print "ERROR: Power is too high. It must be provided in full RECALL units." + exit 1 + } + [ + { name: $"($node_name)_approve_subnet_contract" fn: { parent-chain approve-subnet-contract ($env.state | get $node_name) (2 * $power * 1e18) }} + { name: $"($node_name)_join_subnet" fn: { parent-chain join-subnet ($env.state | get $node_name) $power }} + ] +} diff --git a/deployment/lib/subnet.nu b/deployment/lib/subnet.nu new file mode 100644 index 000000000..8f09e6a18 --- /dev/null +++ b/deployment/lib/subnet.nu @@ -0,0 +1,105 @@ +use util.nu * + +export def wait-for-subnet [] { + while true { + let result = (run-in-container cast chain-id '-r' $env.state.config.subnet.rpc_url | complete) + if $result.exit_code == 0 { + let chain_id = ($result.stdout | into int) + if $chain_id == $env.state.config.subnet.chain_id { + break + } else { + print $"Chain ID was ($chain_id) but expected ($env.state.config.subnet.chain_id)" + exit 1 + } + } else { + print "waiting for subnet..." + sleep 2sec + } + } +} + +export def deploy-faucet-contract [] { + let contract_name = "Faucet" + forge-retry "deploy-faucet-contract" [ + script $"script/($contract_name).s.sol" + --private-key $env.state.faucet_owner.private_key + --tc DeployScript + --sig "'run(uint256)'" (1e18) + --rpc-url $env.state.config.subnet.rpc_url + --broadcast -vv + -g 100000 + ] + + let addr = (open $"($env.state.config.ipc_src_dir)/recall-contracts/broadcast/($contract_name).s.sol/($env.state.config.subnet.chain_id)/run-latest.json" | + get returns | values | where internal_type == $"contract ($contract_name)" | get 0.value) + do $env.state.update ({} | insert $"($contract_name | str downcase)_contract_address" $addr) +} + + +export def fund-faucet-contract [amount] { + cast-retry "fund-faucet-contract" [ + send --private-key $env.state.faucet_owner.private_key + --rpc-url $env.state.config.subnet.rpc_url + --timeout 120 + --confirmations 10 + $env.state.Faucet_contract_address + "'fund()'" + --value $amount + ] +} + +export def faucet-set-drip-amount [drip_amount: float] { + cast-retry "faucet-set-drip-amount" [ + send --private-key $env.state.faucet_owner.private_key + --rpc-url $env.state.config.subnet.rpc_url + --timeout 120 + --confirmations 10 + $env.state.Faucet_contract_address + "'setDripAmount(uint256)'" $drip_amount + ] +} + +export def deploy-subnet-contract [owner: record, contract_name] { + forge-retry $contract_name [ + script $"script/($contract_name).s.sol" + --private-key $owner.private_key + --tc DeployScript + --sig "'run()'" + --rpc-url $env.state.config.subnet.rpc_url + --broadcast -vv + --timeout 30 + -g 100000 + ] + + let addr = (open $"($env.state.config.ipc_src_dir)/recall-contracts/broadcast/($contract_name).s.sol/($env.state.config.subnet.chain_id)/run-latest.json" | get returns | values | where internal_type == $"contract ($contract_name)" | get 0.value) + do $env.state.update ({} | insert $"($contract_name | str downcase)_contract_address" $addr) +} + +export def send-funds [src: record, dest:record, amount: float] { + cast-retry "send-funds" [ + send --private-key $src.private_key + --rpc-url $env.state.config.subnet.rpc_url + --value $amount + $dest.address + ] +} + +# WARNING: this command invokes `recall` CLI on your PATH!!! +export def set-network-admin [] { + let cfg = ($env.state.config.workdir | path join "networks.toml") + recall -c $cfg -n $env.state.config.network subnet config set-admin --private-key $env.state.validator0.private_key $env.state.network_admin.address +} + +# WARNING: this command invokes `recall` CLI on your PATH!!! +export def set-network-config [] { + let cfg = ($env.state.config.workdir | path join "networks.toml") + recall -c $cfg -n $env.state.config.network subnet config set --private-key $env.state.network_admin.private_key ...[ + --blob-capacity (10 * 2 ** 40) + --token-credit-rate (1e36) + --blob-credit-debit-interval 600 + --blob-min-ttl 3600 + --blob-default-ttl 1209600 + --blob-delete-batch-size 100 + --account-debit-batch-size 1000 + ] +} diff --git a/deployment/lib/util.nu b/deployment/lib/util.nu new file mode 100644 index 000000000..7e8a9a9b6 --- /dev/null +++ b/deployment/lib/util.nu @@ -0,0 +1,153 @@ + +export def log [str: string, --color: string = "yellow"] { + print $"(ansi cyan)== [create-subnet] (ansi $color)($str)(ansi reset)" +} + +# Run command in setup docker image +export def run-in-container [...args, --denv: record] { + let cfg = $env.state.config + do {docker rm -f subnet-setup-call} e>| save -a /dev/null + docker run ...[ + --rm --name "subnet-setup-call" -i + -v $"($cfg.ipc_config_dir):/fendermint/.ipc" + -v $"($cfg.ipc_src_dir):/fendermint/ipc" + ...(if ("docker_network" in $cfg) { [--network $cfg.docker_network] } else {[]}) + ...($denv | default {} | items {|k,v| ['-e' $"($k)=($v)"]} | flatten) + + # Run as a current user to avoid git's dubious ownership error + -u $"(id -u):(id -g)" + + # forge clean tries to write HOME (/fendermint) that is ownwed by root. + -e "HOME=/tmp/builder" + -e "IPC_CLI_CONFIG_PATH=/fendermint/.ipc/config.toml" + + $cfg.setup_image + -c ($args | str join ' ') + ] +} + +export def balance-on-subnet [addr: string] { + run-in-container cast balance '--rpc-url' $env.state.config.subnet.rpc_url $addr | into float +} + +export def create-wallet [name] { + let key = (run-in-container cast wallet new '--json' | from json | get 0) + run-in-container 'ipc-cli wallet import --wallet-type evm --private-key' $key.private_key + log $"Created address: ($key.address)" + do $env.state.update ({} | insert $name $key) +} + +export def set-validator-address [name, address] { + do $env.state.update ({} | insert $name {address: $address}) +} + +export def f4-to-eth [addr] { + run-in-container ipc-cli util f4-to-eth-addr '--addr' $addr | str replace -r '.*address: ' '' | str trim +} + +export def wait-for-funds-on-subnet [address, required_amount: float = 1.] { + loop { + let balance = (balance-on-subnet $address) + log $"subnet balance of ($address): ($balance / 1e18) at (date now | format date "%Y-%m-%dT%H:%M:%S")" + if $balance >= $required_amount { + break + } else { + sleep 15sec + } + } +} + +export def confirm [text: string] { + let answer = (input $"Type 'yes' when ($text): ") + if $answer != "yes" { + log "Aborting..." + exit 1 + } +} + +def is-retriable-error [err: string] { + const retriables = [ + "minimum expected nonce is", + "server returned an error response: error code 2: expected sequence", + ] + for r in $retriables { + if ($err | str contains $r) { + return true + } + } + false +} + +# Retry call if error contains "minimum expected nonce" +export def cast-retry [name: string, cast_args] { + mut run = true + while $run { + log $"Calling ($name)..." + let result = (run-in-container cast ...$cast_args | tee {print} | tee -e {print} | complete) + if $result.exit_code == 0 { + $run = false + } else if (is-retriable-error $result.stderr) { + log "retrying in 5sec..." + sleep 5sec + } else { + exit $result.exit_code + } + } +} + +# Retry call if error contains "minimum expected nonce" +export def forge-retry [name: string, forge_args] { + let cd = $"cd ($env.state.config.docker_ipc_src_dir)/recall-contracts;" + run-in-container ...[ + $cd + 'forge clean;' + 'forge install;' + ] + + mut run = true + while $run { + log $"Deploying ($name)..." + let result = (run-in-container $cd forge ...$forge_args | tee {print} | tee -e {print} | complete) + if $result.exit_code == 0 { + $run = false + } else if (is-retriable-error $result.stderr) { + log "retrying in 5sec..." + sleep 5sec + } else { + exit $result.exit_code + } + } +} + +export def get-base-config [ + workdir: string, + network: string, # one of "localnet", "testnet" + fendermint_image: string, +] { + let local_commit = git rev-parse --short=7 HEAD + if ($fendermint_image | str contains "sha-") { + let fendermint_commit = $fendermint_image | str replace -r ".*sha-" "" + if $local_commit != $fendermint_commit { + if ($env.SKIP_COMMIT_MATCH_CHECK? | is-empty) { + print $"ERROR: local commit ($local_commit) does not match fendermint image ($fendermint_image)" + exit 1 + } + } + } + + let wd = $workdir | path expand + let ic = $wd | path join "ipc-config" + const ipc_dir = path self ../.. + { + workdir: $wd + ipc_config_dir: $ic + ipc_config_file: ($ic | path join "config.toml") + ipc_src_dir: $ipc_dir + docker_ipc_src_dir: "/fendermint/ipc" + fendermint_image: $fendermint_image + setup_image: $"subnet-setup:($local_commit)" + network: $network + } +} + +export def state-file [workdir: string] { $workdir | path join "state.yml" } diff --git a/deployment/localnet.nu b/deployment/localnet.nu new file mode 100755 index 000000000..f017cc149 --- /dev/null +++ b/deployment/localnet.nu @@ -0,0 +1,194 @@ +#!/usr/bin/env nu + +use lib/localnet.nu +use lib/state-engine.nu +use lib/local-files.nu +use lib/steps.nu +use lib/parent-chain.nu +use lib/util.nu +use lib/subnet.nu + +const anvil0_pk = "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" + +# See subcommands +def main [] {} + +# Run all localnet services on the local docker. +def "main run" [ + --fendermint-image: string = "fendermint", + --workdir: string = "./localnet-data", + --node-count: int = 2, # how many nodes to run + --dc-repo: string = "https://github.com/recallnet/recall-docker-compose.git", # recall-docker-compose repo to clone + --dc-branch: string = "main", # recall-docker-compose branch + --rebuild-fendermint-image, # rebuild local fendermint image if --fendermint-image=fendermint, no effect otherwise + --reset, # delete previous data + ] { + + let workdir = $workdir | path expand + + if $reset { + reset $workdir + } + + let all_node_indexes = (0..($node_count - 1) | each {$in}) + let additional_node_indexes = ($all_node_indexes | skip 1) + + let build_fendermint_image = (if $rebuild_fendermint_image and $fendermint_image == "fendermint" {[ + { name: "build_fendermint_image" fn: { local-files build-fendermint-image } } + ]} else []) + let bootstrap_additional_nodes = ($additional_node_indexes | each { |ix| [ + { name: $"localnet_node($ix)_create_wallet" fn: { util create-wallet $"validator($ix)"} } + ...(steps prepare-validator $"validator($ix)" 2e18) + { name: $"node($ix)_get_funds_on_subnet" fn: { subnet send-funds $env.state.validator0 ($env.state | get $"validator($ix)") 5e18} } + + { name: $"localnet_run_node($ix)" fn: {localnet run-localnet-node $ix $dc_repo $dc_branch --bootstrap}} + { name: $"localnet_node($ix)_wait_for_cometbft" fn: { localnet wait-for-cometbft $ix }} + + { name: $"localnet_node($ix)_wait_for_sync" fn: { localnet wait-for-sync $ix }} + ...(steps join-subnet $"validator($ix)" 2) + ]} | flatten) + + let run_full_nodes = ($all_node_indexes | each { |ix| [ + { name: $"localnet_run_node($ix)_full" fn: {localnet run-localnet-node $ix $dc_repo $dc_branch}} + ]} | flatten) + + let get_funds_step = {parent-chain send-funds $env.state.supply_source_owner 100e18 --from-private-key $anvil0_pk} + let set_up_contract_owner_steps = [ + { name: "subnet_contract_create_wallet" fn: { util create-wallet "subnet_contract_owner"} } + { name: "subnet_contract_get_funds_on_subnet" fn: { subnet send-funds $env.state.faucet_owner $env.state.subnet_contract_owner 100e18} } + { name: "set_subnet_contract_owner_ref" fn: { do $env.state.update { subnet_contract_owner_ref: "subnet_contract_owner" } } } + ] + + let steps = [ + { name: "localnet_init" fn: { localnet init-state $workdir $fendermint_image}} + { name: "update_submodules" fn: { git submodule update --init --recursive }} + ...$build_fendermint_image + { name: "localnet_start_anvil" fn: {localnet run-anvil $workdir}} + ...(steps get-create-subnet-steps $get_funds_step) + { name: "localnet_run_node0_bootstrap" fn: {localnet run-localnet-node 0 $dc_repo $dc_branch --bootstrap}} + { name: "localnet_node0_wait_for_cometbft" fn: { localnet wait-for-cometbft 0 }} + + ...$bootstrap_additional_nodes + ...(steps get-deploy-subnet-contracts-steps $set_up_contract_owner_steps) + + ...$run_full_nodes + ] + + mkdir $workdir + let state_file = util state-file $workdir + if (state-engine read-state $state_file | get -i graceful_shutdown | default false) { + localnet run-anvil $workdir + glob ($workdir + "/node-*") | each {|dir| + cd ($dir | path join "workdir") + docker compose up -d + } + state-engine update-state $state_file { graceful_shutdown: false } + } else { + state-engine run $state_file $steps --log-prefix "localnet" + } + print-recall-envvars $workdir +} + +# Run the entire localnet in a single container based on textile/recall-localnet. +def "main run-dind" [ + --tag: string = "latest", # tag for textile/recall-localnet + --workdir: string = "./localnet-data", # where to store networks.toml and state.yml + ] { + + docker run ...[ + --rm -d --name localnet + -p 127.0.0.1:8545:8545 + -p 127.0.0.1:8645:8645 + -p 127.0.0.1:8001:8001 + -p 127.0.0.1:26657:26657 + --privileged + $"textile/recall-localnet:($tag)" + ] + print "Container localnet is running." + + mkdir $workdir + docker cp localnet:/workdir/localnet-data/networks.toml ($workdir + "/networks.toml") + docker cp localnet:/workdir/localnet-data/state.yml ($workdir + "/state.yml") + print-recall-envvars $workdir +} + +def print-recall-envvars [workdir: string] { + print "\nRun the folling lines to use with recall CLI:" + print "export RECALL_NETWORK=localnet" + print $"export RECALL_NETWORK_CONFIG_FILE=($workdir + "/networks.toml")" +} + +# Build a docker image containing all localnet services inside. +def "main build-docker-image" [ + --workdir: string = "./localnet-data", + --fendermint-image: string = "fendermint", + --rebuild-fendermint-image, # rebuild local fendermint image if --fendermint-image=fendermint, no effect otherwise + --node-count: int = 2, # how many nodes to run + --dc-repo: string = "https://github.com/recallnet/recall-docker-compose.git", # recall-docker-compose repo to clone + --dc-branch: string = "main", # recall-docker-compose branch + --local-image-tag: string, # build a local image with the given tag + --push-multi-arch-tags: string, # a comma separated list of tags (e.g. textile/recall-localnet:sha-1234567) to push to remote registry + --reset, # delete previous data + ] { + if $reset { reset $workdir } + + let state = state-engine read-state (util state-file $workdir) + + if not ($state.graceful_shutdown? | default false) { + (main run + --fendermint-image $fendermint_image + --workdir $workdir + --node-count $node_count + --dc-repo $dc_repo + --dc-branch $dc_branch + --rebuild-fendermint-image=$rebuild_fendermint_image + ) + localnet stop-network $workdir + } + + localnet build-dind-image $local_image_tag $push_multi_arch_tags +} + +# Stop all localnet containers and deletes the data directory. +def reset [workdir: string] { + print "resetting..." + main stop --force + rm -rf $workdir +} + +# Stop all localnet containers. +def "main stop" [ + --workdir: string = "./localnet-data", + --force, # Force the removal of running containers + ] { + localnet stop-network $workdir --force=$force +} + +# Kill all containers of the node. +def "main kill-node" [ + ix: int, # Index of the node to kill + ] { + docker ps --format json | lines | each {from json} | where Names =~ $"localnet-node-($ix)" | each {docker rm -f $in.ID} +} + +# Reset a single localnet node with a given index. +def "main reset-node" [ + ix: int, # Index of the node to reset + ] { + main kill-node $ix + cd $"localnet-data/node-($ix)" + rm -r workdir + ./init-workdir + cd ./workdir + docker compose up -d +} + +# Get funds on subnet. +def "main get-funds" [ + address: string, + --amount: float = 5e18, # in wei units + --workdir: string = "./localnet-data", + ] { + let state = util state-file $workdir | open + cast send --private-key $state.faucet_owner.private_key -r http://localhost:8645 --value $amount $address +} diff --git a/deployment/set-up-nu.sh b/deployment/set-up-nu.sh new file mode 100755 index 000000000..9f9e3adde --- /dev/null +++ b/deployment/set-up-nu.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# This file downloads the required nushell version to ./.nu folder. + +set -e + +version="0.103.0" + +echo "Installing Nushell..." +nu_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/.nu" + +mkdir -p $nu_dir +cd $nu_dir +os=$(uname | sed -e s/darwin/apple-darwin/i -e s/linux/unknown-linux-gnu/i) +arch=$(uname -m | sed -e s/arm64/aarch64/) +curl -Lo nu.tgz https://github.com/nushell/nushell/releases/download/${version}/nu-${version}-${arch}-${os}.tar.gz +tar xf nu.tgz +mv nu-*/nu . +rm -rf nu-* nu.tgz +echo "export PATH=$(pwd):\$PATH" > $nu_dir/activate.sh +source ./activate.sh + +echo "Nushell installed at $(which nu)" +nu -c version +echo "Activate: source $nu_dir/activate.sh"