diff --git a/src/hydra-queue-runner-v2/.cargo/config.toml b/src/hydra-queue-runner-v2/.cargo/config.toml new file mode 100644 index 000000000..bff29e6e1 --- /dev/null +++ b/src/hydra-queue-runner-v2/.cargo/config.toml @@ -0,0 +1,2 @@ +[build] +rustflags = ["--cfg", "tokio_unstable"] diff --git a/src/hydra-queue-runner-v2/.github/workflows/rust.yml b/src/hydra-queue-runner-v2/.github/workflows/rust.yml new file mode 100644 index 000000000..46c0e5b5a --- /dev/null +++ b/src/hydra-queue-runner-v2/.github/workflows/rust.yml @@ -0,0 +1,69 @@ +on: [push, pull_request] + +name: rust + +env: + CARGO_TERM_COLOR: always + +jobs: + lint: + name: Lint + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Install stable toolchain + uses: dtolnay/rust-toolchain@stable + with: + components: clippy + + - name: Install dependencies + run: sudo apt update && sudo apt install -y pkg-config protobuf-compiler + + - run: cargo check --all --tests --all-features + - run: cargo clippy --all --tests --all-features -- -D warnings + + fmt: + name: Format Check + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Install stable toolchain + uses: dtolnay/rust-toolchain@stable + with: + components: rustfmt + + - run: cargo fmt --all -- --check + + build: + name: Build + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Install stable toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Install dependencies + run: sudo apt update && sudo apt install -y pkg-config protobuf-compiler + + - run: cargo build --all --tests --all-features --examples + + test: + name: Test + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v4 + + - name: Install stable toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Install dependencies + run: sudo apt update && sudo apt install -y pkg-config protobuf-compiler + + - run: cargo test diff --git a/src/hydra-queue-runner-v2/.gitignore b/src/hydra-queue-runner-v2/.gitignore new file mode 100644 index 000000000..199e542b1 --- /dev/null +++ b/src/hydra-queue-runner-v2/.gitignore @@ -0,0 +1,3 @@ +/.direnv +/target +/.env diff --git a/src/hydra-queue-runner-v2/.sqlx/query-049c2b15e5806241473754264be493889dfa874d73dfe257a2b8554b6543acaf.json b/src/hydra-queue-runner-v2/.sqlx/query-049c2b15e5806241473754264be493889dfa874d73dfe257a2b8554b6543acaf.json new file mode 100644 index 000000000..0553d8b7e --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-049c2b15e5806241473754264be493889dfa874d73dfe257a2b8554b6543acaf.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT drvPath FROM BuildSteps WHERE build = $1 AND stepnr = $2", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "drvpath", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + true + ] + }, + "hash": "049c2b15e5806241473754264be493889dfa874d73dfe257a2b8554b6543acaf" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-05e4170e17728e552240a2474d97bdd750d7adc15ea27c0d7b10ba5330ca4f9e.json b/src/hydra-queue-runner-v2/.sqlx/query-05e4170e17728e552240a2474d97bdd750d7adc15ea27c0d7b10ba5330ca4f9e.json new file mode 100644 index 000000000..c9e4406c9 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-05e4170e17728e552240a2474d97bdd750d7adc15ea27c0d7b10ba5330ca4f9e.json @@ -0,0 +1,80 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n builds.id,\n builds.jobset_id,\n jobsets.project as project,\n jobsets.name as jobset,\n job,\n drvPath,\n maxsilent,\n timeout,\n timestamp,\n globalPriority,\n priority\n FROM builds\n INNER JOIN jobsets ON builds.jobset_id = jobsets.id\n WHERE finished = 0 ORDER BY globalPriority desc, schedulingshares, random();", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "jobset_id", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "project", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "jobset", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "job", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "drvpath", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "maxsilent", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "timeout", + "type_info": "Int4" + }, + { + "ordinal": 8, + "name": "timestamp", + "type_info": "Int4" + }, + { + "ordinal": 9, + "name": "globalpriority", + "type_info": "Int4" + }, + { + "ordinal": 10, + "name": "priority", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true, + true, + false, + false, + false + ] + }, + "hash": "05e4170e17728e552240a2474d97bdd750d7adc15ea27c0d7b10ba5330ca4f9e" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-0626225fd580d1b49375835b17e129af1b84c9db615bdd224c5a36ed60344414.json b/src/hydra-queue-runner-v2/.sqlx/query-0626225fd580d1b49375835b17e129af1b84c9db615bdd224c5a36ed60344414.json new file mode 100644 index 000000000..c7d7bea25 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-0626225fd580d1b49375835b17e129af1b84c9db615bdd224c5a36ed60344414.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE builds SET\n finished = 1,\n buildStatus = $2,\n startTime = $3,\n stopTime = $4,\n isCachedBuild = $5,\n notificationPendingSince = $4\n WHERE\n id = $1 AND finished = 0", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "0626225fd580d1b49375835b17e129af1b84c9db615bdd224c5a36ed60344414" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-0b0f985664863d2e7883f7460fc451de6f773b2fd0348a471e8e03571dc80492.json b/src/hydra-queue-runner-v2/.sqlx/query-0b0f985664863d2e7883f7460fc451de6f773b2fd0348a471e8e03571dc80492.json new file mode 100644 index 000000000..0d11ec905 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-0b0f985664863d2e7883f7460fc451de6f773b2fd0348a471e8e03571dc80492.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n project,\n name,\n schedulingshares\n FROM jobsets", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "project", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "schedulingshares", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false + ] + }, + "hash": "0b0f985664863d2e7883f7460fc451de6f773b2fd0348a471e8e03571dc80492" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-0f15524bd358e02a14c66f9058c866e937d5a124040f77ac62c20371c7421ff5.json b/src/hydra-queue-runner-v2/.sqlx/query-0f15524bd358e02a14c66f9058c866e937d5a124040f77ac62c20371c7421ff5.json new file mode 100644 index 000000000..d26b58ab2 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-0f15524bd358e02a14c66f9058c866e937d5a124040f77ac62c20371c7421ff5.json @@ -0,0 +1,21 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE builds SET\n finished = 1,\n buildStatus = $2,\n startTime = $3,\n stopTime = $4,\n size = $5,\n closureSize = $6,\n releaseName = $7,\n isCachedBuild = $8,\n notificationPendingSince = $4\n WHERE\n id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4", + "Int4", + "Int8", + "Int8", + "Text", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "0f15524bd358e02a14c66f9058c866e937d5a124040f77ac62c20371c7421ff5" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-1a5a0676ca03a1c4659f05b45e1a32719dac5fa8bbb879afd3c0e1e106178d59.json b/src/hydra-queue-runner-v2/.sqlx/query-1a5a0676ca03a1c4659f05b45e1a32719dac5fa8bbb879afd3c0e1e106178d59.json new file mode 100644 index 000000000..61555ed82 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-1a5a0676ca03a1c4659f05b45e1a32719dac5fa8bbb879afd3c0e1e106178d59.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT MAX(s.build) FROM buildsteps s\n JOIN BuildStepOutputs o ON s.build = o.build\n WHERE startTime != 0\n AND stopTime != 0\n AND status = 1\n AND drvPath = $1\n AND name = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text", + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "1a5a0676ca03a1c4659f05b45e1a32719dac5fa8bbb879afd3c0e1e106178d59" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-2a6f7b562d13337e240588d25734f9ea9c9601b2d010aa93038872c1538d0816.json b/src/hydra-queue-runner-v2/.sqlx/query-2a6f7b562d13337e240588d25734f9ea9c9601b2d010aa93038872c1538d0816.json new file mode 100644 index 000000000..00c433582 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-2a6f7b562d13337e240588d25734f9ea9c9601b2d010aa93038872c1538d0816.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE builds SET finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "2a6f7b562d13337e240588d25734f9ea9c9601b2d010aa93038872c1538d0816" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-455497955081a6040cc5e95c6d96dae26d2400d878eb0eeb01d39487ff362480.json b/src/hydra-queue-runner-v2/.sqlx/query-455497955081a6040cc5e95c6d96dae26d2400d878eb0eeb01d39487ff362480.json new file mode 100644 index 000000000..6765e5e49 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-455497955081a6040cc5e95c6d96dae26d2400d878eb0eeb01d39487ff362480.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT id FROM builds WHERE id = $1 AND finished = 0", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "455497955081a6040cc5e95c6d96dae26d2400d878eb0eeb01d39487ff362480" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-60433164aae6d537f4b9056a5f9cf46af129639d2cc4cdc3262293846c088906.json b/src/hydra-queue-runner-v2/.sqlx/query-60433164aae6d537f4b9056a5f9cf46af129639d2cc4cdc3262293846c088906.json new file mode 100644 index 000000000..53fa792e0 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-60433164aae6d537f4b9056a5f9cf46af129639d2cc4cdc3262293846c088906.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE buildsteps SET busy = 0, status = $1, stopTime = $2 WHERE busy != 0;", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "60433164aae6d537f4b9056a5f9cf46af129639d2cc4cdc3262293846c088906" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-68ff1a22bdbfe552106035ff2f7fd349df6c1da958374a4842124581c7654d5c.json b/src/hydra-queue-runner-v2/.sqlx/query-68ff1a22bdbfe552106035ff2f7fd349df6c1da958374a4842124581c7654d5c.json new file mode 100644 index 000000000..06c4a46c8 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-68ff1a22bdbfe552106035ff2f7fd349df6c1da958374a4842124581c7654d5c.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE buildoutputs SET path = $3 WHERE build = $1 AND name = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "68ff1a22bdbfe552106035ff2f7fd349df6c1da958374a4842124581c7654d5c" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-694cba4e8063ef370358e1c5828b9450c3c06b8449eeee76f2a3a5131a6e3006.json b/src/hydra-queue-runner-v2/.sqlx/query-694cba4e8063ef370358e1c5828b9450c3c06b8449eeee76f2a3a5131a6e3006.json new file mode 100644 index 000000000..1f2bcfa96 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-694cba4e8063ef370358e1c5828b9450c3c06b8449eeee76f2a3a5131a6e3006.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO systemstatus (\n what, status\n ) VALUES (\n 'queue-runner', $1\n ) ON CONFLICT (what) DO UPDATE SET status = EXCLUDED.status;", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Json" + ] + }, + "nullable": [] + }, + "hash": "694cba4e8063ef370358e1c5828b9450c3c06b8449eeee76f2a3a5131a6e3006" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-6a79a1188d2334feb26b255c1d57430873aa1b192cacdc98ebf7a8e48665d7a4.json b/src/hydra-queue-runner-v2/.sqlx/query-6a79a1188d2334feb26b255c1d57430873aa1b192cacdc98ebf7a8e48665d7a4.json new file mode 100644 index 000000000..06ea27bc4 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-6a79a1188d2334feb26b255c1d57430873aa1b192cacdc98ebf7a8e48665d7a4.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE buildsteps SET\n busy = 0,\n status = $1,\n errorMsg = $4,\n startTime = $5,\n stopTime = $6,\n machine = $7,\n overhead = $8,\n timesBuilt = $9,\n isNonDeterministic = $10\n WHERE\n build = $2 AND stepnr = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4", + "Text", + "Int4", + "Int4", + "Text", + "Int4", + "Int4", + "Bool" + ] + }, + "nullable": [] + }, + "hash": "6a79a1188d2334feb26b255c1d57430873aa1b192cacdc98ebf7a8e48665d7a4" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-7b0ede437aa972b2588415a5bfe8ff545285d457dc809601c9c780c955e16ff5.json b/src/hydra-queue-runner-v2/.sqlx/query-7b0ede437aa972b2588415a5bfe8ff545285d457dc809601c9c780c955e16ff5.json new file mode 100644 index 000000000..a04662e97 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-7b0ede437aa972b2588415a5bfe8ff545285d457dc809601c9c780c955e16ff5.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n type,\n subtype,\n fileSize,\n sha256hash,\n path,\n name,\n defaultPath\n FROM buildproducts\n WHERE build = $1 ORDER BY productnr;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "type", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "subtype", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "filesize", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "sha256hash", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "path", + "type_info": "Text" + }, + { + "ordinal": 5, + "name": "name", + "type_info": "Text" + }, + { + "ordinal": 6, + "name": "defaultpath", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false, + false, + true, + true, + true, + false, + true + ] + }, + "hash": "7b0ede437aa972b2588415a5bfe8ff545285d457dc809601c9c780c955e16ff5" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-83b10946d5b6a044491b73af2c8add459c04deb6b69e755840ff43e859fcd7a9.json b/src/hydra-queue-runner-v2/.sqlx/query-83b10946d5b6a044491b73af2c8add459c04deb6b69e755840ff43e859fcd7a9.json new file mode 100644 index 000000000..ed37db39f --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-83b10946d5b6a044491b73af2c8add459c04deb6b69e755840ff43e859fcd7a9.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n name, unit, value\n FROM buildmetrics\n WHERE build = $1;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "name", + "type_info": "Text" + }, + { + "ordinal": 1, + "name": "unit", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "value", + "type_info": "Float8" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "83b10946d5b6a044491b73af2c8add459c04deb6b69e755840ff43e859fcd7a9" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-84173580db29e288f8e83dbae7ab5636bdcab6b0243168dde7cb7f81d7129fac.json b/src/hydra-queue-runner-v2/.sqlx/query-84173580db29e288f8e83dbae7ab5636bdcab6b0243168dde7cb7f81d7129fac.json new file mode 100644 index 000000000..1803bad92 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-84173580db29e288f8e83dbae7ab5636bdcab6b0243168dde7cb7f81d7129fac.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO builds (\n finished,\n timestamp,\n jobset_id,\n job,\n nixname,\n drvpath,\n system,\n maxsilent,\n timeout,\n ischannel,\n iscurrent,\n priority,\n globalpriority,\n keep\n ) VALUES (\n 0,\n EXTRACT(EPOCH FROM NOW())::INT4,\n $1,\n 'debug',\n 'debug',\n $2,\n $3,\n 7200,\n 36000,\n 0,\n 0,\n 100,\n 0,\n 0);", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "84173580db29e288f8e83dbae7ab5636bdcab6b0243168dde7cb7f81d7129fac" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-877e109257b264b87d8f0b90e91dcd7a913bfb4d216dd190eacd63f5a5f5cd45.json b/src/hydra-queue-runner-v2/.sqlx/query-877e109257b264b87d8f0b90e91dcd7a913bfb4d216dd190eacd63f5a5f5cd45.json new file mode 100644 index 000000000..c14f22e80 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-877e109257b264b87d8f0b90e91dcd7a913bfb4d216dd190eacd63f5a5f5cd45.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT MAX(build) FROM buildsteps WHERE drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "877e109257b264b87d8f0b90e91dcd7a913bfb4d216dd190eacd63f5a5f5cd45" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-8d6f5c4a866eca834ec76d1b7068f4091f65139067f38b16b047ba2561c0cfee.json b/src/hydra-queue-runner-v2/.sqlx/query-8d6f5c4a866eca834ec76d1b7068f4091f65139067f38b16b047ba2561c0cfee.json new file mode 100644 index 000000000..2789d301e --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-8d6f5c4a866eca834ec76d1b7068f4091f65139067f38b16b047ba2561c0cfee.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE builds SET\n finished = 1,\n buildStatus = $2,\n startTime = $3,\n stopTime = $3,\n isCachedBuild = 1,\n notificationPendingSince = $3\n WHERE\n id = $1 AND finished = 0", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "8d6f5c4a866eca834ec76d1b7068f4091f65139067f38b16b047ba2561c0cfee" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-9ac00b06367900308f94877d24cc7db38d6e03e9208206195f52214648ddc2d2.json b/src/hydra-queue-runner-v2/.sqlx/query-9ac00b06367900308f94877d24cc7db38d6e03e9208206195f52214648ddc2d2.json new file mode 100644 index 000000000..f597331b7 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-9ac00b06367900308f94877d24cc7db38d6e03e9208206195f52214648ddc2d2.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n id,\n globalPriority\n FROM builds\n WHERE finished = 0;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "globalpriority", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false + ] + }, + "hash": "9ac00b06367900308f94877d24cc7db38d6e03e9208206195f52214648ddc2d2" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-a1a314a6add9d6b0f8e0930bf3422165a892887680443545bbfcb2b94925706a.json b/src/hydra-queue-runner-v2/.sqlx/query-a1a314a6add9d6b0f8e0930bf3422165a892887680443545bbfcb2b94925706a.json new file mode 100644 index 000000000..850901218 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-a1a314a6add9d6b0f8e0930bf3422165a892887680443545bbfcb2b94925706a.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE buildstepoutputs SET path = $4 WHERE build = $1 AND stepnr = $2 AND name = $3", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "a1a314a6add9d6b0f8e0930bf3422165a892887680443545bbfcb2b94925706a" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-a6cedfa152e75af251a255584fca2d67fdd2bf0bb258f4320da7c00bdf3c7680.json b/src/hydra-queue-runner-v2/.sqlx/query-a6cedfa152e75af251a255584fca2d67fdd2bf0bb258f4320da7c00bdf3c7680.json new file mode 100644 index 000000000..38ee720f3 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-a6cedfa152e75af251a255584fca2d67fdd2bf0bb258f4320da7c00bdf3c7680.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO failedpaths (\n path\n ) VALUES (\n $1\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [] + }, + "hash": "a6cedfa152e75af251a255584fca2d67fdd2bf0bb258f4320da7c00bdf3c7680" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-a75df630ab35feb02b610618f538a1b3f5d45e09ad11c2c7a6113a505895c792.json b/src/hydra-queue-runner-v2/.sqlx/query-a75df630ab35feb02b610618f538a1b3f5d45e09ad11c2c7a6113a505895c792.json new file mode 100644 index 000000000..d5319a0ac --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-a75df630ab35feb02b610618f538a1b3f5d45e09ad11c2c7a6113a505895c792.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT MAX(stepnr) FROM buildsteps WHERE build = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a75df630ab35feb02b610618f538a1b3f5d45e09ad11c2c7a6113a505895c792" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-a9d877539b53ae4a71066f39613b2d02ed7a72e8ca184d2935a07f498a8992a7.json b/src/hydra-queue-runner-v2/.sqlx/query-a9d877539b53ae4a71066f39613b2d02ed7a72e8ca184d2935a07f498a8992a7.json new file mode 100644 index 000000000..dc22ca5be --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-a9d877539b53ae4a71066f39613b2d02ed7a72e8ca184d2935a07f498a8992a7.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE buildsteps SET busy = $1 WHERE build = $2 AND stepnr = $3 AND busy != 0 AND status IS NULL", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "a9d877539b53ae4a71066f39613b2d02ed7a72e8ca184d2935a07f498a8992a7" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-aba8c9ff906e7abdbc49f2a4fb4a15a05ea5fab14dfa676cfb6a5a9d84297438.json b/src/hydra-queue-runner-v2/.sqlx/query-aba8c9ff906e7abdbc49f2a4fb4a15a05ea5fab14dfa676cfb6a5a9d84297438.json new file mode 100644 index 000000000..74ea6e8d8 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-aba8c9ff906e7abdbc49f2a4fb4a15a05ea5fab14dfa676cfb6a5a9d84297438.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT s.startTime, s.stopTime FROM buildsteps s join builds b on build = id\n WHERE\n s.startTime IS NOT NULL AND\n to_timestamp(s.stopTime) > (NOW() - (interval '1 second' * $1)) AND\n jobset_id = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "starttime", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "stoptime", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Float8", + "Int4" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "aba8c9ff906e7abdbc49f2a4fb4a15a05ea5fab14dfa676cfb6a5a9d84297438" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-af02dbe27d340f0ee8ed4803d3a1ecf1fa7268064dba9130bbd833f1f484e2a7.json b/src/hydra-queue-runner-v2/.sqlx/query-af02dbe27d340f0ee8ed4803d3a1ecf1fa7268064dba9130bbd833f1f484e2a7.json new file mode 100644 index 000000000..b7363a23b --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-af02dbe27d340f0ee8ed4803d3a1ecf1fa7268064dba9130bbd833f1f484e2a7.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT status FROM systemstatus WHERE what = 'queue-runner';", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "status", + "type_info": "Json" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "af02dbe27d340f0ee8ed4803d3a1ecf1fa7268064dba9130bbd833f1f484e2a7" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-b0f2aab8d3aae78a8c695d0a9433b19cb5ba6806d74c252593b76243e1c8c853.json b/src/hydra-queue-runner-v2/.sqlx/query-b0f2aab8d3aae78a8c695d0a9433b19cb5ba6806d74c252593b76243e1c8c853.json new file mode 100644 index 000000000..b3a602296 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-b0f2aab8d3aae78a8c695d0a9433b19cb5ba6806d74c252593b76243e1c8c853.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT path FROM failedpaths where path = ANY($1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "path", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "TextArray" + ] + }, + "nullable": [ + false + ] + }, + "hash": "b0f2aab8d3aae78a8c695d0a9433b19cb5ba6806d74c252593b76243e1c8c853" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-b600c93d66b45b9859173e0e0f7f36b29a641f881e3e61f728b39ddf068dddde.json b/src/hydra-queue-runner-v2/.sqlx/query-b600c93d66b45b9859173e0e0f7f36b29a641f881e3e61f728b39ddf068dddde.json new file mode 100644 index 000000000..0f6f3ba6e --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-b600c93d66b45b9859173e0e0f7f36b29a641f881e3e61f728b39ddf068dddde.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM buildproducts WHERE build = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [] + }, + "hash": "b600c93d66b45b9859173e0e0f7f36b29a641f881e3e61f728b39ddf068dddde" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-d155253f103fb9c24374199bd58a827071e69bf33e427bfa040a83d07b909e31.json b/src/hydra-queue-runner-v2/.sqlx/query-d155253f103fb9c24374199bd58a827071e69bf33e427bfa040a83d07b909e31.json new file mode 100644 index 000000000..03f62654b --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-d155253f103fb9c24374199bd58a827071e69bf33e427bfa040a83d07b909e31.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT schedulingshares FROM jobsets WHERE id = $1", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "schedulingshares", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "d155253f103fb9c24374199bd58a827071e69bf33e427bfa040a83d07b909e31" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-d1a70032949d4c470a20ba89589ce6f36c7fd576ed02d868f4185dd9f886a30b.json b/src/hydra-queue-runner-v2/.sqlx/query-d1a70032949d4c470a20ba89589ce6f36c7fd576ed02d868f4185dd9f886a30b.json new file mode 100644 index 000000000..ab1332f4a --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-d1a70032949d4c470a20ba89589ce6f36c7fd576ed02d868f4185dd9f886a30b.json @@ -0,0 +1,21 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO buildmetrics (\n build,\n name,\n unit,\n value,\n project,\n jobset,\n job,\n timestamp\n ) VALUES (\n $1, $2, $3, $4, $5, $6, $7, $8\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Text", + "Text", + "Float8", + "Text", + "Text", + "Text", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "d1a70032949d4c470a20ba89589ce6f36c7fd576ed02d868f4185dd9f886a30b" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-e0d7d4bdb291d4d403aa37627d8b1cdc91d6c3515d2ae530fe14830950eecffb.json b/src/hydra-queue-runner-v2/.sqlx/query-e0d7d4bdb291d4d403aa37627d8b1cdc91d6c3515d2ae530fe14830950eecffb.json new file mode 100644 index 000000000..9a73e57fd --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-e0d7d4bdb291d4d403aa37627d8b1cdc91d6c3515d2ae530fe14830950eecffb.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT MAX(s.build) FROM buildsteps s\n JOIN BuildStepOutputs o ON s.build = o.build\n WHERE startTime != 0\n AND stopTime != 0\n AND status = 1\n AND path = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + null + ] + }, + "hash": "e0d7d4bdb291d4d403aa37627d8b1cdc91d6c3515d2ae530fe14830950eecffb" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-e418dcd190f68642391dc8793fc30ad500dc5d513fce55a901e1d4a400323cc8.json b/src/hydra-queue-runner-v2/.sqlx/query-e418dcd190f68642391dc8793fc30ad500dc5d513fce55a901e1d4a400323cc8.json new file mode 100644 index 000000000..c670f2083 --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-e418dcd190f68642391dc8793fc30ad500dc5d513fce55a901e1d4a400323cc8.json @@ -0,0 +1,46 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n id, buildStatus, releaseName, closureSize, size\n FROM builds b\n JOIN buildoutputs o on b.id = o.build\n WHERE finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1;", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "buildstatus", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "releasename", + "type_info": "Text" + }, + { + "ordinal": 3, + "name": "closuresize", + "type_info": "Int8" + }, + { + "ordinal": 4, + "name": "size", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false, + true, + true, + true, + true + ] + }, + "hash": "e418dcd190f68642391dc8793fc30ad500dc5d513fce55a901e1d4a400323cc8" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-ec762ac14489cf15630e71703f7f1baf0e4fcf0a5bf41293343c626ce1b64f1a.json b/src/hydra-queue-runner-v2/.sqlx/query-ec762ac14489cf15630e71703f7f1baf0e4fcf0a5bf41293343c626ce1b64f1a.json new file mode 100644 index 000000000..9aeb1026e --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-ec762ac14489cf15630e71703f7f1baf0e4fcf0a5bf41293343c626ce1b64f1a.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM buildmetrics WHERE build = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4" + ] + }, + "nullable": [] + }, + "hash": "ec762ac14489cf15630e71703f7f1baf0e4fcf0a5bf41293343c626ce1b64f1a" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-ed7c8532b3c113ceb008675379f500a950610c46d6edafa7d9dca960398d8e45.json b/src/hydra-queue-runner-v2/.sqlx/query-ed7c8532b3c113ceb008675379f500a950610c46d6edafa7d9dca960398d8e45.json new file mode 100644 index 000000000..9eaae642e --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-ed7c8532b3c113ceb008675379f500a950610c46d6edafa7d9dca960398d8e45.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO buildproducts (\n build,\n productnr,\n type,\n subtype,\n fileSize,\n sha256hash,\n path,\n name,\n defaultPath\n ) VALUES (\n $1, $2, $3, $4, $5, $6, $7, $8, $9\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Text", + "Text", + "Int8", + "Text", + "Text", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "ed7c8532b3c113ceb008675379f500a950610c46d6edafa7d9dca960398d8e45" +} diff --git a/src/hydra-queue-runner-v2/.sqlx/query-f99df144ca85c041624011f32dcfa0cce129a17edc95fad2444552bee8d6779d.json b/src/hydra-queue-runner-v2/.sqlx/query-f99df144ca85c041624011f32dcfa0cce129a17edc95fad2444552bee8d6779d.json new file mode 100644 index 000000000..383766a7d --- /dev/null +++ b/src/hydra-queue-runner-v2/.sqlx/query-f99df144ca85c041624011f32dcfa0cce129a17edc95fad2444552bee8d6779d.json @@ -0,0 +1,25 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO buildsteps (\n build,\n stepnr,\n type,\n drvPath,\n busy,\n startTime,\n stopTime,\n system,\n status,\n propagatedFrom,\n errorMsg,\n machine\n ) VALUES (\n $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12\n )\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Int4", + "Text", + "Int4", + "Int4", + "Int4", + "Text", + "Int4", + "Int4", + "Text", + "Text" + ] + }, + "nullable": [] + }, + "hash": "f99df144ca85c041624011f32dcfa0cce129a17edc95fad2444552bee8d6779d" +} diff --git a/src/hydra-queue-runner-v2/Cargo.lock b/src/hydra-queue-runner-v2/Cargo.lock new file mode 100644 index 000000000..9448ca294 --- /dev/null +++ b/src/hydra-queue-runner-v2/Cargo.lock @@ -0,0 +1,4303 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.16", + "once_cell", + "version_check", +] + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "getrandom 0.3.3", + "once_cell", + "serde", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" + +[[package]] +name = "anstyle-parse" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +dependencies = [ + "windows-sys 0.60.2", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.60.2", +] + +[[package]] +name = "anyhow" +version = "1.0.99" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" + +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "async-stream" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "atomic_float" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "628d228f918ac3b82fe590352cc719d30664a0c13ca3a60266fe02c7132d480a" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core 0.4.5", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit 0.7.3", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" +dependencies = [ + "axum-core 0.5.2", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "itoa", + "matchit 0.8.4", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-targets 0.52.6", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "base64ct" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" + +[[package]] +name = "bitflags" +version = "2.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d" +dependencies = [ + "serde", +] + +[[package]] +name = "bitvec" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" +dependencies = [ + "funty", + "radium", + "tap", + "wyz", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "borsh" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad8646f98db542e39fc66e68a20b2144f6a732636df7c2354e74645faaa433ce" +dependencies = [ + "borsh-derive", + "cfg_aliases", +] + +[[package]] +name = "borsh-derive" +version = "1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fdd1d3c0c2f5833f22386f252fe8ed005c7f59fdcddeef025c01b4c3b9fd9ac3" +dependencies = [ + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "bstr" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "234113d19d0d7d613b40e86fb654acf958910802bcceab913a4f9e7cda03b1a4" +dependencies = [ + "memchr", +] + +[[package]] +name = "builder" +version = "0.1.0" +dependencies = [ + "ahash 0.8.12", + "anyhow", + "async-stream", + "clap", + "futures", + "gethostname", + "hyper-util", + "log", + "nix", + "nix-utils", + "os_pipe", + "parking_lot", + "procfs-core", + "prost 0.14.1", + "sd-notify", + "shared", + "sysinfo", + "thiserror 2.0.16", + "tikv-jemallocator", + "tokio", + "tokio-stream", + "tonic 0.14.1", + "tonic-prost", + "tonic-prost-build", + "tower 0.5.2", + "tracing", + "tracing-log", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byte-unit" +version = "5.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cd29c3c585209b0cbc7309bfe3ed7efd8c84c21b7af29c8bfae908f8777174" +dependencies = [ + "rust_decimal", + "serde", + "utf8-width", +] + +[[package]] +name = "bytecheck" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" +dependencies = [ + "bytecheck_derive", + "ptr_meta", + "simdutf8", +] + +[[package]] +name = "bytecheck_derive" +version = "0.6.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" + +[[package]] +name = "cc" +version = "1.2.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42bc4aea80032b7bf409b0bc7ccad88853858911b7713a8062fdc0623867bedc" +dependencies = [ + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "num-traits", + "serde", + "windows-link", +] + +[[package]] +name = "clap" +version = "4.5.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c5e4fcf9c21d2e544ca1ee9d8552de13019a42aa7dbf32747fa7aaf1df76e57" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fecb53a0e6fcfb055f686001bc2e2592fa527efaf38dbe81a6a9563562e57d41" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14cb31bb0a7d536caef2639baa7fad459e15c3144efefa6dbd1c84562c4739f6" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "clap_lex" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" + +[[package]] +name = "codespan-reporting" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe6d2e5af09e8c8ad56c969f2157a3d4238cebc7c55f0a517728c38f7b200f81" +dependencies = [ + "serde", + "termcolor", + "unicode-width", +] + +[[package]] +name = "colorchoice" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "console-api" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8030735ecb0d128428b64cd379809817e620a40e5001c54465b99ec5feec2857" +dependencies = [ + "futures-core", + "prost 0.13.5", + "prost-types 0.13.5", + "tonic 0.12.3", + "tracing-core", +] + +[[package]] +name = "console-subscriber" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6539aa9c6a4cd31f4b1c040f860a1eac9aa80e7df6b05d506a6e7179936d6a01" +dependencies = [ + "console-api", + "crossbeam-channel", + "crossbeam-utils", + "futures-task", + "hdrhistogram", + "humantime", + "hyper-util", + "prost 0.13.5", + "prost-types 0.13.5", + "serde", + "serde_json", + "thread_local", + "tokio", + "tokio-stream", + "tonic 0.12.3", + "tracing", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crc" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "2.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" + +[[package]] +name = "crc32fast" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-queue" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crypto-common" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "cxx" +version = "1.0.173" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c64ed3da1c337cbaae7223cdcff8b4dddf698d188cd3eaddd1116f6b0295950" +dependencies = [ + "cc", + "cxxbridge-cmd", + "cxxbridge-flags", + "cxxbridge-macro", + "foldhash 0.2.0", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.173" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae0a26a75a05551f5ae3d75b3557543d06682284eaa7419113162d602cb45766" +dependencies = [ + "cc", + "codespan-reporting", + "indexmap 2.11.0", + "proc-macro2", + "quote", + "scratch", + "syn 2.0.106", +] + +[[package]] +name = "cxxbridge-cmd" +version = "1.0.173" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "952d408b6002b7db4b36da07c682a9cbb34f2db0efa03e976ae50a388414e16c" +dependencies = [ + "clap", + "codespan-reporting", + "indexmap 2.11.0", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.173" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccbd201b471c75c6abb6321cace706d1982d270e308b891c11a3262d320f5265" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.173" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bea8b915bbc4cb4288f242aa7ca18b23ecc6965e4d6e7c1b07905e3fe2e0c41" +dependencies = [ + "indexmap 2.11.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.106", +] + +[[package]] +name = "db" +version = "0.1.0" +dependencies = [ + "ahash 0.8.12", + "anyhow", + "chrono", + "futures", + "serde", + "serde_json", + "sqlx", + "tracing", +] + +[[package]] +name = "der" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "dotenvy" +version = "0.15.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" +dependencies = [ + "serde", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "etcetera" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" +dependencies = [ + "cfg-if", + "home", + "windows-sys 0.48.0", +] + +[[package]] +name = "event-listener" +version = "5.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flate2" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "flume" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095" +dependencies = [ + "futures-core", + "futures-sink", + "spin", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "funty" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-intrusive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" +dependencies = [ + "futures-core", + "lock_api", + "parking_lot", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "gethostname" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc257fdb4038301ce4b9cd1b3b51704509692bb3ff716a410cbd07925d9dae55" +dependencies = [ + "rustix", + "windows-targets 0.52.6", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", +] + +[[package]] +name = "getrandom" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasi 0.14.3+wasi-0.2.4", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http", + "indexmap 2.11.0", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash 0.1.5", +] + +[[package]] +name = "hashlink" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1" +dependencies = [ + "hashbrown 0.15.5", +] + +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "base64 0.21.7", + "byteorder", + "flate2", + "nom", + "num-traits", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "home" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "http" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http", + "http-body", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" + +[[package]] +name = "hyper" +version = "1.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" +dependencies = [ + "hyper", + "hyper-util", + "pin-project-lite", + "tokio", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http", + "http-body", + "hyper", + "libc", + "pin-project-lite", + "socket2 0.6.0", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.63" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" + +[[package]] +name = "icu_properties" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" +dependencies = [ + "displaydoc", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "potential_utf", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" + +[[package]] +name = "icu_provider" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" +dependencies = [ + "displaydoc", + "icu_locale_core", + "stable_deref_trait", + "tinystr", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +dependencies = [ + "equivalent", + "hashbrown 0.15.5", +] + +[[package]] +name = "io-uring" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" +dependencies = [ + "bitflags", + "cfg-if", + "libc", +] + +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.3", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.77" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +dependencies = [ + "spin", +] + +[[package]] +name = "libc" +version = "0.2.175" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" + +[[package]] +name = "libm" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3" +dependencies = [ + "bitflags", + "libc", + "redox_syscall", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" +dependencies = [ + "pkg-config", + "vcpkg", +] + +[[package]] +name = "link-cplusplus" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a6f6da007f968f9def0d65a05b187e2960183de70c160204ecfccf0ee330212" +dependencies = [ + "cc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + +[[package]] +name = "listenfd" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b87bc54a4629b4294d0b3ef041b64c40c611097a677d9dc07b2c67739fe39dba" +dependencies = [ + "libc", + "uuid", + "winapi", +] + +[[package]] +name = "litemap" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" + +[[package]] +name = "lock_api" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "lockfile" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be1cf190319c74ba3e45923624626ae2e43fe42ad7e60ff38ded81044c37630" + +[[package]] +name = "log" +version = "0.4.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "matchit" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + +[[package]] +name = "md-5" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" +dependencies = [ + "cfg-if", + "digest", +] + +[[package]] +name = "memchr" +version = "2.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" +dependencies = [ + "libc", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", +] + +[[package]] +name = "multimap" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nix-diff" +version = "0.1.0" +source = "git+https://github.com/mic92/nix-diff-rs.git?rev=6c0902f9c6f756b09095e9d77b424332ff0e32e9#6c0902f9c6f756b09095e9d77b424332ff0e32e9" +dependencies = [ + "anyhow", + "memchr", + "similar", + "tempfile", + "tikv-jemallocator", + "tinyjson", +] + +[[package]] +name = "nix-utils" +version = "0.1.0" +dependencies = [ + "ahash 0.8.12", + "anyhow", + "bytes", + "cxx", + "cxx-build", + "futures", + "log", + "nix-diff", + "pkg-config", + "regex", + "serde", + "sha2", + "thiserror 2.0.16", + "tokio", + "tokio-stream", + "tokio-util", + "tracing", + "url", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "ntapi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" +dependencies = [ + "winapi", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "num-bigint-dig" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151" +dependencies = [ + "byteorder", + "lazy_static", + "libm", + "num-integer", + "num-iter", + "num-traits", + "rand", + "smallvec", + "zeroize", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-iter" +version = "0.1.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "objc2-core-foundation" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166" +dependencies = [ + "bitflags", +] + +[[package]] +name = "objc2-io-kit" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71c1c64d6120e51cd86033f67176b1cb66780c2efe34dec55176f77befd93c0a" +dependencies = [ + "libc", + "objc2-core-foundation", +] + +[[package]] +name = "object" +version = "0.36.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + +[[package]] +name = "openssl" +version = "0.10.73" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" +dependencies = [ + "bitflags", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "os_pipe" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db335f4760b14ead6290116f2427bf33a14d4f0617d49f78a246de10c1831224" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "parking" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" + +[[package]] +name = "parking_lot" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.52.6", +] + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "petgraph" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" +dependencies = [ + "fixedbitset", + "indexmap 2.11.0", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkcs1" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" +dependencies = [ + "der", + "pkcs8", + "spki", +] + +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "potential_utf" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +dependencies = [ + "zerovec", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.106", +] + +[[package]] +name = "proc-macro-crate" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" +dependencies = [ + "toml_edit", +] + +[[package]] +name = "proc-macro2" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "procfs" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25485360a54d6861439d60facef26de713b1e126bf015ec8f98239467a2b82f7" +dependencies = [ + "bitflags", + "chrono", + "flate2", + "procfs-core", + "rustix", +] + +[[package]] +name = "procfs-core" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6401bf7b6af22f78b563665d15a22e9aef27775b79b149a66ca022468a4e405" +dependencies = [ + "bitflags", + "chrono", + "hex", +] + +[[package]] +name = "prometheus" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a" +dependencies = [ + "cfg-if", + "fnv", + "lazy_static", + "memchr", + "parking_lot", + "protobuf", + "thiserror 2.0.16", +] + +[[package]] +name = "prost" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" +dependencies = [ + "bytes", + "prost-derive 0.13.5", +] + +[[package]] +name = "prost" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +dependencies = [ + "bytes", + "prost-derive 0.14.1", +] + +[[package]] +name = "prost-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +dependencies = [ + "heck", + "itertools", + "log", + "multimap", + "once_cell", + "petgraph", + "prettyplease", + "prost 0.14.1", + "prost-types 0.14.1", + "pulldown-cmark", + "pulldown-cmark-to-cmark", + "regex", + "syn 2.0.106", + "tempfile", +] + +[[package]] +name = "prost-derive" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "prost-derive" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "prost-types" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" +dependencies = [ + "prost 0.13.5", +] + +[[package]] +name = "prost-types" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +dependencies = [ + "prost 0.14.1", +] + +[[package]] +name = "protobuf" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" +dependencies = [ + "once_cell", + "protobuf-support", + "thiserror 1.0.69", +] + +[[package]] +name = "protobuf-support" +version = "3.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" +dependencies = [ + "thiserror 1.0.69", +] + +[[package]] +name = "ptr_meta" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" +dependencies = [ + "ptr_meta_derive", +] + +[[package]] +name = "ptr_meta_derive" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "pulldown-cmark" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +dependencies = [ + "bitflags", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark-to-cmark" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" +dependencies = [ + "pulldown-cmark", +] + +[[package]] +name = "queue-runner" +version = "0.1.0" +dependencies = [ + "ahash 0.8.12", + "anyhow", + "arc-swap", + "async-stream", + "atomic_float", + "byte-unit", + "bytes", + "chrono", + "clap", + "console-subscriber", + "db", + "futures", + "futures-util", + "h2", + "http-body-util", + "hyper", + "hyper-util", + "listenfd", + "lockfile", + "log", + "nix-utils", + "parking_lot", + "procfs", + "prometheus", + "prost 0.14.1", + "sd-notify", + "secrecy", + "serde", + "serde_json", + "shared", + "thiserror 2.0.16", + "tikv-jemallocator", + "tokio", + "tokio-stream", + "toml", + "tonic 0.14.1", + "tonic-prost", + "tonic-prost-build", + "tonic-reflection", + "tracing", + "tracing-log", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "radium" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "redox_syscall" +version = "0.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" + +[[package]] +name = "rend" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" +dependencies = [ + "bytecheck", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rkyv" +version = "0.7.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" +dependencies = [ + "bitvec", + "bytecheck", + "bytes", + "hashbrown 0.12.3", + "ptr_meta", + "rend", + "rkyv_derive", + "seahash", + "tinyvec", + "uuid", +] + +[[package]] +name = "rkyv_derive" +version = "0.7.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "rsa" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b" +dependencies = [ + "const-oid", + "digest", + "num-bigint-dig", + "num-integer", + "num-traits", + "pkcs1", + "pkcs8", + "rand_core", + "signature", + "spki", + "subtle", + "zeroize", +] + +[[package]] +name = "rust_decimal" +version = "1.37.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b203a6425500a03e0919c42d3c47caca51e79f1132046626d2c8871c5092035d" +dependencies = [ + "arrayvec", + "borsh", + "bytes", + "num-traits", + "rand", + "rkyv", + "serde", + "serde_json", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustix" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.60.2", +] + +[[package]] +name = "rustls" +version = "0.23.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc" +dependencies = [ + "log", + "once_cell", + "ring", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-pki-types" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc" +dependencies = [ + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "scratch" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d68f2ec51b097e4c1a75b681a8bec621909b5e91f15bb7b840c4f2f7b01148b2" + +[[package]] +name = "sd-notify" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" +dependencies = [ + "libc", +] + +[[package]] +name = "seahash" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" + +[[package]] +name = "secrecy" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e891af845473308773346dc847b2c23ee78fe442e0472ac50e22a18a93d3ae5a" +dependencies = [ + "serde", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "serde" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.219" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "serde_json" +version = "1.0.143" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40734c41988f7306bb04f0ecf60ec0f3f1caa34290e4e8ea471dcd3346483b83" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shared" +version = "0.1.0" +dependencies = [ + "anyhow", + "nix-utils", + "regex", + "sha2", + "tokio", + "tracing", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +dependencies = [ + "libc", +] + +[[package]] +name = "signature" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" +dependencies = [ + "digest", + "rand_core", +] + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "similar" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbbb5d9659141646ae647b42fe094daf6c6192d1620870b449d9557f748b2daa" +dependencies = [ + "bstr", +] + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" +dependencies = [ + "serde", +] + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" +dependencies = [ + "lock_api", +] + +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + +[[package]] +name = "sqlx" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fefb893899429669dcdd979aff487bd78f4064e5e7907e4269081e0ef7d97dc" +dependencies = [ + "sqlx-core", + "sqlx-macros", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", +] + +[[package]] +name = "sqlx-core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "crc", + "crossbeam-queue", + "either", + "event-listener", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashbrown 0.15.5", + "hashlink", + "indexmap 2.11.0", + "log", + "memchr", + "native-tls", + "once_cell", + "percent-encoding", + "serde", + "serde_json", + "sha2", + "smallvec", + "thiserror 2.0.16", + "tokio", + "tokio-stream", + "tracing", + "url", +] + +[[package]] +name = "sqlx-macros" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2d452988ccaacfbf5e0bdbc348fb91d7c8af5bee192173ac3636b5fb6e6715d" +dependencies = [ + "proc-macro2", + "quote", + "sqlx-core", + "sqlx-macros-core", + "syn 2.0.106", +] + +[[package]] +name = "sqlx-macros-core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19a9c1841124ac5a61741f96e1d9e2ec77424bf323962dd894bdb93f37d5219b" +dependencies = [ + "dotenvy", + "either", + "heck", + "hex", + "once_cell", + "proc-macro2", + "quote", + "serde", + "serde_json", + "sha2", + "sqlx-core", + "sqlx-mysql", + "sqlx-postgres", + "sqlx-sqlite", + "syn 2.0.106", + "tokio", + "url", +] + +[[package]] +name = "sqlx-mysql" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags", + "byteorder", + "bytes", + "chrono", + "crc", + "digest", + "dotenvy", + "either", + "futures-channel", + "futures-core", + "futures-io", + "futures-util", + "generic-array", + "hex", + "hkdf", + "hmac", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "percent-encoding", + "rand", + "rsa", + "serde", + "sha1", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror 2.0.16", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-postgres" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" +dependencies = [ + "atoi", + "base64 0.22.1", + "bitflags", + "byteorder", + "chrono", + "crc", + "dotenvy", + "etcetera", + "futures-channel", + "futures-core", + "futures-util", + "hex", + "hkdf", + "hmac", + "home", + "itoa", + "log", + "md-5", + "memchr", + "once_cell", + "rand", + "serde", + "serde_json", + "sha2", + "smallvec", + "sqlx-core", + "stringprep", + "thiserror 2.0.16", + "tracing", + "whoami", +] + +[[package]] +name = "sqlx-sqlite" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea" +dependencies = [ + "atoi", + "chrono", + "flume", + "futures-channel", + "futures-core", + "futures-executor", + "futures-intrusive", + "futures-util", + "libsqlite3-sys", + "log", + "percent-encoding", + "serde", + "serde_urlencoded", + "sqlx-core", + "thiserror 2.0.16", + "tracing", + "url", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" + +[[package]] +name = "stringprep" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" +dependencies = [ + "unicode-bidi", + "unicode-normalization", + "unicode-properties", +] + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "sysinfo" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "252800745060e7b9ffb7b2badbd8b31cfa4aa2e61af879d0a3bf2a317c20217d" +dependencies = [ + "libc", + "memchr", + "ntapi", + "objc2-core-foundation", + "objc2-io-kit", + "windows", +] + +[[package]] +name = "tap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" + +[[package]] +name = "tempfile" +version = "3.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e" +dependencies = [ + "fastrand", + "getrandom 0.3.3", + "once_cell", + "rustix", + "windows-sys 0.60.2", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" +dependencies = [ + "thiserror-impl 2.0.16", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "tikv-jemalloc-sys" +version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "tikv-jemallocator" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + +[[package]] +name = "tinyjson" +version = "2.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ab95735ea2c8fd51154d01e39cf13912a78071c2d89abc49a7ef102a7dd725a" + +[[package]] +name = "tinystr" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tinyvec" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + +[[package]] +name = "tokio" +version = "1.47.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +dependencies = [ + "backtrace", + "bytes", + "io-uring", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "slab", + "socket2 0.6.0", + "tokio-macros", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "tokio-macros" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-stream" +version = "0.1.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "toml" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75129e1dc5000bfbaa9fee9d1b21f974f9fbad9daec557a521ee6e080825f6e8" +dependencies = [ + "indexmap 2.11.0", + "serde", + "serde_spanned", + "toml_datetime 0.7.0", + "toml_parser", + "toml_writer", + "winnow", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" + +[[package]] +name = "toml_datetime" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap 2.11.0", + "toml_datetime 0.6.11", + "winnow", +] + +[[package]] +name = "toml_parser" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b551886f449aa90d4fe2bdaa9f4a2577ad2dde302c61ecf262d80b116db95c10" +dependencies = [ + "winnow", +] + +[[package]] +name = "toml_writer" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc842091f2def52017664b53082ecbbeb5c7731092bad69d2c63050401dfd64" + +[[package]] +name = "tonic" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.7.9", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "prost 0.13.5", + "socket2 0.5.10", + "tokio", + "tokio-stream", + "tower 0.4.13", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tonic" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67ac5a8627ada0968acec063a4746bf79588aa03ccb66db2f75d7dce26722a40" +dependencies = [ + "async-trait", + "axum 0.8.4", + "base64 0.22.1", + "bytes", + "h2", + "http", + "http-body", + "http-body-util", + "hyper", + "hyper-timeout", + "hyper-util", + "percent-encoding", + "pin-project", + "socket2 0.6.0", + "sync_wrapper", + "tokio", + "tokio-rustls", + "tokio-stream", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", + "zstd", +] + +[[package]] +name = "tonic-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49e323d8bba3be30833707e36d046deabf10a35ae8ad3cae576943ea8933e25d" +dependencies = [ + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "tonic-prost" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9c511b9a96d40cb12b7d5d00464446acf3b9105fd3ce25437cfe41c92b1c87d" +dependencies = [ + "bytes", + "prost 0.14.1", + "tonic 0.14.1", +] + +[[package]] +name = "tonic-prost-build" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ef298fcd01b15e135440c4b8c974460ceca4e6a5af7f1c933b08e4d2875efa1" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types 0.14.1", + "quote", + "syn 2.0.106", + "tempfile", + "tonic-build", +] + +[[package]] +name = "tonic-reflection" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0267a0073385cd94996197d12acb1856a3a0a2367482c726a48a769f6fed8a3a" +dependencies = [ + "prost 0.14.1", + "prost-types 0.14.1", + "tokio", + "tokio-stream", + "tonic 0.14.1", + "tonic-prost", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 2.11.0", + "pin-project-lite", + "slab", + "sync_wrapper", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.41" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "tracing-core" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "typenum" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" + +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + +[[package]] +name = "unicode-bidi" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "unicode-normalization" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +dependencies = [ + "tinyvec", +] + +[[package]] +name = "unicode-properties" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" + +[[package]] +name = "unicode-width" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8-width" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86bd8d4e895da8537e5315b8254664e6b769c4ff3db18321b297a1e7004392e3" + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f33196643e165781c20a5ead5582283a7dacbb87855d867fbc2df3f81eddc1be" +dependencies = [ + "getrandom 0.3.3", + "js-sys", + "serde", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasi" +version = "0.14.3+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + +[[package]] +name = "wasm-bindgen" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +dependencies = [ + "bumpalo", + "log", + "proc-macro2", + "quote", + "syn 2.0.106", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "whoami" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d" +dependencies = [ + "libredox", + "wasite", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" +dependencies = [ + "windows-sys 0.60.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows" +version = "0.61.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections", + "windows-core", + "windows-future", + "windows-link", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" +dependencies = [ + "windows-core", +] + +[[package]] +name = "windows-core" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core", + "windows-link", + "windows-threading", +] + +[[package]] +name = "windows-implement" +version = "0.60.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "windows-interface" +version = "0.59.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "windows-link" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-numerics" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" +dependencies = [ + "windows-core", + "windows-link", +] + +[[package]] +name = "windows-result" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.3", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] + +[[package]] +name = "wit-bindgen" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "052283831dbae3d879dc7f51f3d92703a316ca49f91540417d38591826127814" + +[[package]] +name = "writeable" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" + +[[package]] +name = "wyz" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" +dependencies = [ + "tap", +] + +[[package]] +name = "yoke" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" +dependencies = [ + "serde", + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.15+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/src/hydra-queue-runner-v2/Cargo.toml b/src/hydra-queue-runner-v2/Cargo.toml new file mode 100644 index 000000000..76642fa4c --- /dev/null +++ b/src/hydra-queue-runner-v2/Cargo.toml @@ -0,0 +1,9 @@ +[workspace] + +resolver = "2" +members = ["builder", "queue-runner", "crates/*"] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 diff --git a/src/hydra-queue-runner-v2/builder/Cargo.toml b/src/hydra-queue-runner-v2/builder/Cargo.toml new file mode 100644 index 000000000..eb9317b09 --- /dev/null +++ b/src/hydra-queue-runner-v2/builder/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "builder" +version = "0.1.0" +edition = "2024" +license = "GPL-3.0" + +[dependencies] +log = "0.4" +tracing = "0.1" +tracing-subscriber = { version = "0.3.18", features = [ + "registry", + "env-filter", +] } +tracing-log = "0.2.0" + +sd-notify = "0.4.5" + +anyhow = "1.0.98" +thiserror = "2.0" +clap = { version = "4", features = ["derive"] } +uuid = { version = "1.16", features = ["v4"] } +ahash = "0.8.11" +parking_lot = "0.12.4" + +tokio = { version = "1.34", features = ["full"] } +tokio-stream = "0.1" +futures = "0.3" +prost = "0.14" +tonic = { version = "0.14", features = ["zstd", "tls-ring"] } +tonic-prost = "0.14" +tower = "0.5.2" +hyper-util = "0.1.10" +async-stream = "0.3" + +gethostname = "1" +procfs-core = "0.18" +nix = { version = "0.30.0", default-features = false, features = ["fs"] } +os_pipe = "1.2" + +nix-utils = { path = "../crates/nix-utils" } +shared = { path = "../crates/shared" } + +[target.'cfg(target_os = "macos")'.dependencies] +sysinfo = "0.36.0" + +[target.'cfg(not(target_env = "msvc"))'.dependencies] +tikv-jemallocator = "0.6" + +[build-dependencies] +tonic-prost-build = "0.14" diff --git a/src/hydra-queue-runner-v2/builder/build.rs b/src/hydra-queue-runner-v2/builder/build.rs new file mode 100644 index 000000000..fcae72702 --- /dev/null +++ b/src/hydra-queue-runner-v2/builder/build.rs @@ -0,0 +1,10 @@ +use std::{env, path::PathBuf}; + +fn main() -> Result<(), Box> { + let out_dir = PathBuf::from(env::var("OUT_DIR")?); + tonic_prost_build::configure() + .build_server(false) + .file_descriptor_set_path(out_dir.join("streaming_descriptor.bin")) + .compile_protos(&["../proto/v1/streaming.proto"], &["../proto"])?; + Ok(()) +} diff --git a/src/hydra-queue-runner-v2/builder/src/config.rs b/src/hydra-queue-runner-v2/builder/src/config.rs new file mode 100644 index 000000000..36b4f55f2 --- /dev/null +++ b/src/hydra-queue-runner-v2/builder/src/config.rs @@ -0,0 +1,137 @@ +use clap::Parser; + +#[derive(Parser, Debug)] +#[clap( + author, + version, + about, + long_about = None, +)] +pub struct Args { + /// Gateway endpoint + #[clap(short, long, default_value = "http://[::1]:50051")] + pub gateway_endpoint: String, + + /// Ping interval in seconds + #[clap(short, long, default_value_t = 10)] + pub ping_interval: u64, + + /// Speed factor that is used when joining the queue-runner + #[clap(short, long, default_value_t = 1.0)] + pub speed_factor: f32, + + /// Maximum number of allowed jobs + #[clap(long, default_value_t = 4)] + pub max_jobs: u32, + + /// /tmp avail percentage Threshold + #[clap(long, default_value_t = 10.)] + pub tmp_avail_threshold: f32, + + /// prefix/store avail percentage Threshold + #[clap(long, default_value_t = 10.)] + pub store_avail_threshold: f32, + + /// Load1 Threshold + #[clap(long, default_value_t = 8.)] + pub load1_threshold: f32, + + /// CPU Pressure Threshold + #[clap(long, default_value_t = 75.)] + pub cpu_psi_threshold: f32, + + /// Memory Pressure Threshold + #[clap(long, default_value_t = 80.)] + pub mem_psi_threshold: f32, + + /// IO Pressure Threshold, null disables this pressure check + #[clap(long)] + pub io_psi_threshold: Option, + + /// Path to Server root ca cert + #[clap(long)] + pub server_root_ca_cert_path: Option, + + /// Path to Client cert + #[clap(long)] + pub client_cert_path: Option, + + /// Path to Client key + #[clap(long)] + pub client_key_path: Option, + + /// Domain name for mtls + #[clap(long)] + pub domain_name: Option, + + /// List of supported systems, defaults to systems from nix and extra-platforms + #[clap(long, default_value = None)] + pub systems: Option>, + + /// List of supported features, defaults to configured system features + #[clap(long, default_value = None)] + pub supported_features: Option>, + + /// List of mandatory features + #[clap(long, default_value = None)] + pub mandatory_features: Option>, + + /// Use substitution over pulling inputs via queue runner + #[clap(long, default_value_t = false)] + pub use_substitutes: bool, +} + +impl Args { + pub fn new() -> Self { + Self::parse() + } + + pub fn mtls_enabled(&self) -> bool { + self.server_root_ca_cert_path.is_some() + && self.client_cert_path.is_some() + && self.client_key_path.is_some() + && self.domain_name.is_some() + } + + pub fn mtls_configured_correctly(&self) -> bool { + self.mtls_enabled() + || (self.server_root_ca_cert_path.is_none() + && self.client_cert_path.is_none() + && self.client_key_path.is_none() + && self.domain_name.is_none()) + } + + pub async fn get_mtls( + &self, + ) -> anyhow::Result<( + tonic::transport::Certificate, + tonic::transport::Identity, + String, + )> { + let server_root_ca_cert_path = self + .server_root_ca_cert_path + .as_deref() + .ok_or(anyhow::anyhow!("server_root_ca_cert_path not provided"))?; + let client_cert_path = self + .client_cert_path + .as_deref() + .ok_or(anyhow::anyhow!("client_cert_path not provided"))?; + let client_key_path = self + .client_key_path + .as_deref() + .ok_or(anyhow::anyhow!("client_key_path not provided"))?; + let domain_name = self + .domain_name + .as_deref() + .ok_or(anyhow::anyhow!("domain_name not provided"))?; + + let server_root_ca_cert = tokio::fs::read_to_string(server_root_ca_cert_path).await?; + let server_root_ca_cert = tonic::transport::Certificate::from_pem(server_root_ca_cert); + + let client_cert = tokio::fs::read_to_string(client_cert_path).await?; + let client_key = tokio::fs::read_to_string(client_key_path).await?; + let client_identity = tonic::transport::Identity::from_pem(client_cert, client_key); + + Ok((server_root_ca_cert, client_identity, domain_name.to_owned())) + } +} diff --git a/src/hydra-queue-runner-v2/builder/src/main.rs b/src/hydra-queue-runner-v2/builder/src/main.rs new file mode 100644 index 000000000..1e2d432c3 --- /dev/null +++ b/src/hydra-queue-runner-v2/builder/src/main.rs @@ -0,0 +1,224 @@ +#![deny(clippy::all)] +#![deny(clippy::pedantic)] +#![allow(clippy::match_wildcard_for_single_variants)] + +use std::sync::Arc; + +use anyhow::Context; +use runner_v1::{ + BuilderRequest, builder_request, runner_request, runner_service_client::RunnerServiceClient, +}; +use state::State; +use tonic::transport::Channel; + +use tokio_stream::StreamExt as _; +use tonic::Request; +use tracing_subscriber::{Layer as _, Registry, layer::SubscriberExt as _}; + +mod config; +mod state; +mod system; +mod runner_v1 { + // We need to allow pedantic here because of generated code + #![allow(clippy::pedantic)] + + tonic::include_proto!("runner.v1"); +} + +#[cfg(not(target_env = "msvc"))] +#[global_allocator] +static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +#[tracing::instrument(skip(state, client, request), err)] +async fn handle_request( + state: Arc, + client: &mut RunnerServiceClient, + request: runner_request::Message, +) -> anyhow::Result<()> { + match request { + runner_request::Message::Build(m) => { + let client = client.clone(); + state.schedule_build(client, m); + } + runner_request::Message::Abort(m) => { + state.abort_build(&m); + } + runner_request::Message::Join(m) => { + state.max_concurrent_downloads.store( + m.max_concurrent_downloads, + std::sync::atomic::Ordering::Relaxed, + ); + } + runner_request::Message::Ping(_) => (), + } + Ok(()) +} + +#[tracing::instrument(skip(client, state), err)] +async fn start_bidirectional_stream( + client: &mut RunnerServiceClient, + state: Arc, +) -> anyhow::Result<()> { + let state2 = state.clone(); + let join_msg = state.get_join_message().await?; + + let ping_stream = async_stream::stream! { + yield BuilderRequest { + message: Some(builder_request::Message::Join(join_msg)) + }; + + let mut interval = tokio::time::interval(std::time::Duration::from_secs(state.config.ping_interval)); + loop { + interval.tick().await; + + let ping = match state.get_ping_message() { + Ok(v) => builder_request::Message::Ping(v), + Err(e) => { + log::error!("Failed to construct ping message: {e}"); + continue + }, + }; + log::debug!("sending ping: {ping:?}"); + + yield BuilderRequest { + message: Some(ping) + }; + } + }; + let mut stream = client + .open_tunnel(Request::new(ping_stream)) + .await? + .into_inner(); + + let mut consecutive_failure_count = 0; + while let Some(item) = stream.next().await { + match item.map(|v| v.message) { + Ok(Some(v)) => { + consecutive_failure_count = 0; + if let Err(err) = handle_request(state2.clone(), client, v).await { + log::error!("Failed to correctly handle request: {err}"); + } + } + Ok(None) => { + consecutive_failure_count = 0; + } + Err(e) => { + consecutive_failure_count += 1; + log::error!("stream message delivery failed: {e}"); + if consecutive_failure_count == 10 { + return Err(anyhow::anyhow!( + "Failed to communicate {consecutive_failure_count} times over the channel. \ + Terminating the application." + )); + } + } + } + } + Ok(()) +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + tracing_log::LogTracer::init()?; + let log_env_filter = tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")); + let fmt_layer = tracing_subscriber::fmt::layer() + .compact() + .with_filter(log_env_filter); + let subscriber = Registry::default().with(fmt_layer); + tracing::subscriber::set_global_default(subscriber)?; + + let args = config::Args::new(); + + if !args.mtls_configured_correctly() { + log::error!( + "mtls configured inproperly, please pass all options: \ + server_root_ca_cert_path, client_cert_path, client_key_path and domain_name!" + ); + return Err(anyhow::anyhow!("Configuration issue")); + } + + log::info!("connecting to {}", args.gateway_endpoint); + let channel = if args.mtls_enabled() { + log::info!("mtls is enabled"); + let (server_root_ca_cert, client_identity, domain_name) = args + .get_mtls() + .await + .context("Failed to get_mtls Certificate and Identity")?; + let tls = tonic::transport::ClientTlsConfig::new() + .domain_name(domain_name) + .ca_certificate(server_root_ca_cert) + .identity(client_identity); + + tonic::transport::Channel::builder(args.gateway_endpoint.parse()?) + .tls_config(tls) + .context("Failed to attach tls config")? + .connect() + .await + .context("Failed to establish connection with Channel")? + } else if let Some(path) = args.gateway_endpoint.strip_prefix("unix://") { + let path = path.to_owned(); + tonic::transport::Endpoint::try_from("http://[::]:50051")? + .connect_with_connector(tower::service_fn(move |_: tonic::transport::Uri| { + let path = path.clone(); + async move { + Ok::<_, std::io::Error>(hyper_util::rt::TokioIo::new( + tokio::net::UnixStream::connect(&path).await?, + )) + } + })) + .await + .context("Failed to establish unix socket connection with Channel")? + } else { + tonic::transport::Channel::builder(args.gateway_endpoint.parse()?) + .connect() + .await + .context("Failed to establish connection with Channel")? + }; + + let state = State::new(args)?; + let mut client = RunnerServiceClient::new(channel) + .send_compressed(tonic::codec::CompressionEncoding::Zstd) + .accept_compressed(tonic::codec::CompressionEncoding::Zstd) + .max_decoding_message_size(50 * 1024 * 1024) + .max_encoding_message_size(50 * 1024 * 1024); + let task = tokio::spawn({ + let state = state.clone(); + async move { start_bidirectional_stream(&mut client, state.clone()).await } + }); + + let _notify = sd_notify::notify( + false, + &[ + sd_notify::NotifyState::Status("Running"), + sd_notify::NotifyState::Ready, + ], + ); + + let mut sigint = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::interrupt())?; + let mut sigterm = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; + + let abort_handle = task.abort_handle(); + + tokio::select! { + _ = sigint.recv() => { + log::info!("Received sigint - shutting down gracefully"); + let _ = sd_notify::notify(false, &[sd_notify::NotifyState::Stopping]); + abort_handle.abort(); + state.abort_all_active_builds(); + let _ = state.clear_gcroots(); + } + _ = sigterm.recv() => { + log::info!("Received sigterm - shutting down gracefully"); + let _ = sd_notify::notify(false, &[sd_notify::NotifyState::Stopping]); + abort_handle.abort(); + state.abort_all_active_builds(); + let _ = state.clear_gcroots(); + } + r = task => { + let _ = state.clear_gcroots(); + r??; + } + }; + Ok(()) +} diff --git a/src/hydra-queue-runner-v2/builder/src/state.rs b/src/hydra-queue-runner-v2/builder/src/state.rs new file mode 100644 index 000000000..a494a7c43 --- /dev/null +++ b/src/hydra-queue-runner-v2/builder/src/state.rs @@ -0,0 +1,807 @@ +use std::{ + sync::{Arc, atomic}, + time::Instant, +}; + +use ahash::AHashMap; +use anyhow::Context; +use futures::TryFutureExt as _; +use tonic::Request; +use tracing::Instrument; + +use crate::runner_v1::{BuildResultState, StepStatus, StepUpdate}; +use nix_utils::BaseStore as _; + +#[derive(thiserror::Error, Debug)] +#[allow(clippy::enum_variant_names)] +pub enum JobFailure { + #[error("Build failure: `{0}`")] + Build(anyhow::Error), + #[error("Preparing failure: `{0}`")] + Preparing(anyhow::Error), + #[error("Import failure: `{0}`")] + Import(anyhow::Error), + #[error("Upload failure: `{0}`")] + Upload(anyhow::Error), + #[error("Post processing failure: `{0}`")] + PostProcessing(anyhow::Error), +} + +impl From for BuildResultState { + fn from(item: JobFailure) -> Self { + match item { + JobFailure::Build(_) => Self::BuildFailure, + JobFailure::Preparing(_) => Self::PreparingFailure, + JobFailure::Import(_) => Self::ImportFailure, + JobFailure::Upload(_) => Self::UploadFailure, + JobFailure::PostProcessing(_) => Self::PostProcessingFailure, + } + } +} + +pub struct BuildInfo { + handle: tokio::task::JoinHandle<()>, +} + +impl BuildInfo { + fn abort(&self) { + self.handle.abort(); + } +} + +pub struct Config { + pub ping_interval: u64, + pub speed_factor: f32, + pub max_jobs: u32, + pub tmp_avail_threshold: f32, + pub store_avail_threshold: f32, + pub load1_threshold: f32, + pub cpu_psi_threshold: f32, + pub mem_psi_threshold: f32, + pub io_psi_threshold: Option, + pub gcroots: std::path::PathBuf, + pub systems: Option>, + pub supported_features: Option>, + pub mandatory_features: Option>, + pub use_substitutes: bool, +} + +pub struct State { + id: uuid::Uuid, + + active_builds: parking_lot::RwLock>>, + + pub config: Config, + pub store: nix_utils::LocalStore, + + pub max_concurrent_downloads: atomic::AtomicU32, +} + +#[derive(Debug)] +struct Gcroot { + root: std::path::PathBuf, +} + +impl Gcroot { + pub fn new(path: std::path::PathBuf) -> std::io::Result { + std::fs::create_dir_all(&path)?; + Ok(Self { root: path }) + } +} + +impl std::fmt::Display for Gcroot { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{}", self.root.display()) + } +} + +impl Drop for Gcroot { + fn drop(&mut self) { + if self.root.exists() { + let _ = std::fs::remove_dir_all(&self.root); + } + } +} + +impl State { + pub fn new(args: super::config::Args) -> anyhow::Result> { + let store = nix_utils::LocalStore::init(); + nix_utils::set_verbosity(1); + + let logname = std::env::var("LOGNAME").context("LOGNAME not set")?; + let nix_state_dir = std::env::var("NIX_STATE_DIR").unwrap_or("/nix/var/nix/".to_owned()); + let gcroots = std::path::PathBuf::from(nix_state_dir) + .join("gcroots/per-user") + .join(logname) + .join("hydra-roots"); + std::fs::create_dir_all(&gcroots)?; + + Ok(Arc::new(Self { + id: uuid::Uuid::new_v4(), + active_builds: parking_lot::RwLock::new(AHashMap::new()), + config: Config { + ping_interval: args.ping_interval, + speed_factor: args.speed_factor, + max_jobs: args.max_jobs, + tmp_avail_threshold: args.tmp_avail_threshold, + store_avail_threshold: args.store_avail_threshold, + load1_threshold: args.load1_threshold, + cpu_psi_threshold: args.cpu_psi_threshold, + mem_psi_threshold: args.mem_psi_threshold, + io_psi_threshold: args.io_psi_threshold, + gcroots, + systems: args.systems, + supported_features: args.supported_features, + mandatory_features: args.mandatory_features, + use_substitutes: args.use_substitutes, + }, + store, + max_concurrent_downloads: 5.into(), + })) + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_join_message(&self) -> anyhow::Result { + let sys = crate::system::BaseSystemInfo::new()?; + + let join = crate::runner_v1::JoinMessage { + machine_id: self.id.to_string(), + systems: if let Some(s) = &self.config.systems { + s.clone() + } else { + let mut out = Vec::with_capacity(8); + out.push(nix_utils::get_this_system()); + out.extend(nix_utils::get_extra_platforms()); + out + }, + hostname: gethostname::gethostname() + .into_string() + .map_err(|_| anyhow::anyhow!("Couldn't convert hostname to string"))?, + cpu_count: u32::try_from(sys.cpu_count)?, + bogomips: sys.bogomips, + speed_factor: self.config.speed_factor, + max_jobs: self.config.max_jobs, + tmp_avail_threshold: self.config.tmp_avail_threshold, + store_avail_threshold: self.config.store_avail_threshold, + load1_threshold: self.config.load1_threshold, + cpu_psi_threshold: self.config.cpu_psi_threshold, + mem_psi_threshold: self.config.mem_psi_threshold, + io_psi_threshold: self.config.io_psi_threshold, + total_mem: sys.total_memory, + supported_features: if let Some(s) = &self.config.supported_features { + s.clone() + } else { + nix_utils::get_system_features() + }, + mandatory_features: self.config.mandatory_features.clone().unwrap_or_default(), + cgroups: nix_utils::get_use_cgroups(), + }; + + log::info!("Builder systems={:?}", join.systems); + log::info!("Builder supported_features={:?}", join.supported_features); + log::info!("Builder mandatory_features={:?}", join.mandatory_features); + log::info!("Builder use_cgroups={:?}", join.cgroups); + + Ok(join) + } + + #[tracing::instrument(skip(self), err)] + pub fn get_ping_message(&self) -> anyhow::Result { + let sysinfo = crate::system::SystemLoad::new()?; + + Ok(crate::runner_v1::PingMessage { + machine_id: self.id.to_string(), + load1: sysinfo.load_avg_1, + load5: sysinfo.load_avg_5, + load15: sysinfo.load_avg_15, + mem_usage: sysinfo.mem_usage, + pressure: sysinfo.pressure.map(|p| crate::runner_v1::PressureState { + cpu_some: p.cpu_some.map(Into::into), + mem_some: p.mem_some.map(Into::into), + mem_full: p.mem_full.map(Into::into), + io_some: p.io_some.map(Into::into), + io_full: p.io_full.map(Into::into), + irq_full: p.irq_full.map(Into::into), + }), + tmp_free_percent: sysinfo.tmp_free_percent, + store_free_percent: sysinfo.store_free_percent, + }) + } + + #[tracing::instrument(skip(self, client, m), fields(drv=%m.drv))] + pub fn schedule_build( + self: Arc, + mut client: crate::runner_v1::runner_service_client::RunnerServiceClient< + tonic::transport::Channel, + >, + m: crate::runner_v1::BuildMessage, + ) { + let drv = nix_utils::StorePath::new(&m.drv); + if self.contains_build(&drv) { + return; + } + log::info!("Building {drv}"); + + let task_handle = tokio::spawn({ + let self_ = self.clone(); + let drv = drv.clone(); + async move { + let mut import_elapsed = std::time::Duration::from_millis(0); + let mut build_elapsed = std::time::Duration::from_millis(0); + match self_ + .process_build(client.clone(), m, &mut import_elapsed, &mut build_elapsed) + .await + { + Ok(()) => { + log::info!("Successfully completed build process for {drv}"); + self_.remove_build(&drv); + } + Err(e) => { + log::error!("Build of {drv} failed with {e}"); + self_.remove_build(&drv); + let failed_build = crate::runner_v1::BuildResultInfo { + machine_id: self_.id.to_string(), + drv: drv.into_base_name(), + import_time_ms: u64::try_from(import_elapsed.as_millis()) + .unwrap_or_default(), + build_time_ms: u64::try_from(build_elapsed.as_millis()) + .unwrap_or_default(), + result_state: BuildResultState::from(e) as i32, + outputs: vec![], + nix_support: None, + }; + + for i in 0..3 { + match client.complete_build(failed_build.clone()).await { + Ok(_) => break, + Err(e) => { + if i == 2 { + log::error!("Failed to submit build failure info: {e}"); + } else { + log::error!( + "Failed to submit build failure info (retrying ... i={i}): {e}" + ); + // TODO: backoff + tokio::time::sleep(tokio::time::Duration::from_secs(1)) + .await; + } + } + } + } + } + } + } + }); + + self.insert_new_build( + drv, + BuildInfo { + handle: task_handle, + }, + ); + } + + fn contains_build(&self, drv: &nix_utils::StorePath) -> bool { + let active = self.active_builds.read(); + active.contains_key(drv) + } + + fn insert_new_build(&self, drv: nix_utils::StorePath, b: BuildInfo) { + { + let mut active = self.active_builds.write(); + active.insert(drv, Arc::new(b)); + } + self.publish_builds_to_sd_notify(); + } + + fn remove_build(&self, drv: &nix_utils::StorePath) -> Option> { + let b = { + let mut active = self.active_builds.write(); + active.remove(drv) + }; + self.publish_builds_to_sd_notify(); + b + } + + #[tracing::instrument(skip(self, m), fields(drv=%m.drv))] + pub fn abort_build(&self, m: &crate::runner_v1::AbortMessage) { + if let Some(b) = self.remove_build(&nix_utils::StorePath::new(&m.drv)) { + b.abort(); + } + } + + pub fn abort_all_active_builds(&self) { + let mut active = self.active_builds.write(); + for b in active.values() { + b.abort(); + } + active.clear(); + } + + #[tracing::instrument(skip(self, client, m), fields(drv=%m.drv), err)] + #[allow(clippy::too_many_lines)] + async fn process_build( + &self, + mut client: crate::runner_v1::runner_service_client::RunnerServiceClient< + tonic::transport::Channel, + >, + m: crate::runner_v1::BuildMessage, + import_elapsed: &mut std::time::Duration, + build_elapsed: &mut std::time::Duration, + ) -> Result<(), JobFailure> { + // we dont use anyhow here because we manually need to write the correct build status + // to the queue runner. + use tokio_stream::StreamExt as _; + + let machine_id = self.id; + let drv = nix_utils::StorePath::new(&m.drv); + let resolved_drv = m + .resolved_drv + .as_ref() + .map(|v| nix_utils::StorePath::new(v)); + + let before_import = Instant::now(); + let gcroot_prefix = uuid::Uuid::new_v4().to_string(); + let gcroot = self + .get_gcroot(&gcroot_prefix) + .map_err(|e| JobFailure::Preparing(e.into()))?; + + let _ = client // we ignore the error here, as this step status has no prio + .build_step_update(StepUpdate { + machine_id: machine_id.to_string(), + drv: drv.base_name().to_owned(), + step_status: StepStatus::SeningInputs as i32, + }) + .await; + let requisites = client + .fetch_drv_requisites(crate::runner_v1::FetchRequisitesRequest { + path: resolved_drv.as_ref().unwrap_or(&drv).base_name().to_owned(), + include_outputs: false, + }) + .await + .map_err(|e| JobFailure::Import(e.into()))? + .into_inner() + .requisites; + + import_requisites( + &mut client, + self.store.clone(), + &gcroot, + resolved_drv.as_ref().unwrap_or(&drv), + requisites + .into_iter() + .map(|s| nix_utils::StorePath::new(&s)), + usize::try_from( + self.max_concurrent_downloads + .load(atomic::Ordering::Relaxed), + ) + .unwrap_or(5), + self.config.use_substitutes, + ) + .await + .map_err(JobFailure::Import)?; + *import_elapsed = before_import.elapsed(); + + // Resolved drv and drv output paths are the same + let drv_info = nix_utils::query_drv(&drv) + .await + .map_err(|e| JobFailure::Import(e.into()))? + .ok_or(JobFailure::Import(anyhow::anyhow!("drv not found")))?; + + let _ = client // we ignore the error here, as this step status has no prio + .build_step_update(StepUpdate { + machine_id: machine_id.to_string(), + drv: drv.base_name().to_owned(), + step_status: StepStatus::Building as i32, + }) + .await; + let before_build = Instant::now(); + let (mut child, mut log_output) = nix_utils::realise_drv( + resolved_drv.as_ref().unwrap_or(&drv), + &nix_utils::BuildOptions::complete(m.max_log_size, m.max_silent_time, m.build_timeout), + true, + ) + .await + .map_err(|e| JobFailure::Build(e.into()))?; + let drv2 = drv.clone(); + let log_stream = async_stream::stream! { + while let Some(chunk) = log_output.next().await { + match chunk { + Ok(chunk) => yield crate::runner_v1::LogChunk { + drv: drv2.base_name().to_owned(), + data: format!("{chunk}\n").into(), + }, + Err(e) => { + log::error!("Failed to write log chunk to queue-runner: {e}"); + break + } + } + } + }; + client + .build_log(Request::new(log_stream)) + .await + .map_err(|e| JobFailure::Build(e.into()))?; + let output_paths = drv_info + .outputs + .iter() + .filter_map(|o| o.path.clone()) + .collect::>(); + nix_utils::validate_statuscode( + child + .wait() + .await + .map_err(|e| JobFailure::Build(e.into()))?, + ) + .map_err(|e| JobFailure::Build(e.into()))?; + for o in &output_paths { + nix_utils::add_root(&gcroot.root, o); + } + + *build_elapsed = before_build.elapsed(); + log::info!("Finished building {drv}"); + + let _ = client // we ignore the error here, as this step status has no prio + .build_step_update(StepUpdate { + machine_id: machine_id.to_string(), + drv: drv.base_name().to_owned(), + step_status: StepStatus::ReceivingOutputs as i32, + }) + .await; + upload_nars(client.clone(), self.store.clone(), output_paths) + .await + .map_err(JobFailure::Upload)?; + + let _ = client // we ignore the error here, as this step status has no prio + .build_step_update(StepUpdate { + machine_id: machine_id.to_string(), + drv: drv.base_name().to_owned(), + step_status: StepStatus::PostProcessing as i32, + }) + .await; + let build_results = new_success_build_result_info( + self.store.clone(), + machine_id, + &drv, + drv_info, + *import_elapsed, + *build_elapsed, + ) + .await + .map_err(JobFailure::PostProcessing)?; + // This part is stupid, if writing doesnt work, we try to write a failure, maybe that works + client + .complete_build(build_results) + .await + .map_err(|e| JobFailure::PostProcessing(e.into()))?; + + Ok(()) + } + + #[tracing::instrument(skip(self), err)] + fn get_gcroot(&self, prefix: &str) -> std::io::Result { + Gcroot::new(self.config.gcroots.join(prefix)) + } + + #[tracing::instrument(skip(self))] + fn publish_builds_to_sd_notify(&self) { + let active = { + let builds = self.active_builds.read(); + builds + .keys() + .map(|b| b.base_name().to_owned()) + .collect::>() + }; + + let _notify = sd_notify::notify( + false, + &[ + sd_notify::NotifyState::Status(&if active.is_empty() { + "Building 0 drvs".into() + } else { + format!("Building {} drvs: {}", active.len(), active.join(", ")) + }), + sd_notify::NotifyState::Ready, + ], + ); + } + + pub fn clear_gcroots(&self) -> std::io::Result<()> { + std::fs::remove_dir_all(&self.config.gcroots)?; + std::fs::create_dir_all(&self.config.gcroots)?; + Ok(()) + } +} + +#[tracing::instrument(fields(%gcroot, %path))] +async fn filter_missing( + gcroot: &Gcroot, + path: nix_utils::StorePath, +) -> Option { + if nix_utils::check_if_storepath_exists(&path).await { + nix_utils::add_root(&gcroot.root, &path); + None + } else { + Some(path) + } +} + +async fn substitute_paths( + paths: &[&nix_utils::StorePath], + build_opts: &nix_utils::BuildOptions, +) -> anyhow::Result<()> { + let (mut child, _) = nix_utils::realise_drvs(paths, build_opts, false).await?; + nix_utils::validate_statuscode(child.wait().await?)?; + Ok(()) +} + +#[tracing::instrument(skip(client, store), fields(%gcroot), err)] +async fn import_paths( + mut client: crate::runner_v1::runner_service_client::RunnerServiceClient< + tonic::transport::Channel, + >, + store: nix_utils::LocalStore, + gcroot: &Gcroot, + paths: Vec, + filter: bool, + use_substitutes: Option<&nix_utils::BuildOptions>, +) -> anyhow::Result<()> { + use futures::StreamExt as _; + + let paths = if filter { + futures::StreamExt::map(tokio_stream::iter(paths), |p| filter_missing(gcroot, p)) + .buffered(10) + .filter_map(|o| async { o }) + .collect::>() + .await + } else { + paths + }; + let paths = if let Some(build_opts) = use_substitutes { + // we can ignore the error + let _ = substitute_paths(&paths.iter().collect::>(), build_opts).await; + let paths = + futures::StreamExt::map(tokio_stream::iter(paths), |p| filter_missing(gcroot, p)) + .buffered(10) + .filter_map(|o| async { o }) + .collect::>() + .await; + if paths.is_empty() { + return Ok(()); + } + paths + } else { + paths + }; + + log::debug!("Start importing paths"); + let stream = client + .stream_files(crate::runner_v1::StorePaths { + paths: paths.iter().map(|p| p.base_name().to_owned()).collect(), + }) + .await? + .into_inner(); + + store + .import_paths( + tokio_stream::StreamExt::map(stream, |s| { + s.map(|v| v.chunk.into()) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::UnexpectedEof, e)) + }), + false, + ) + .await?; + log::debug!("Finished importing paths"); + + for p in paths { + nix_utils::add_root(&gcroot.root, &p); + } + Ok(()) +} + +#[tracing::instrument(skip(client, store, requisites), fields(%gcroot, %drv), err)] +async fn import_requisites>( + client: &mut crate::runner_v1::runner_service_client::RunnerServiceClient< + tonic::transport::Channel, + >, + store: nix_utils::LocalStore, + gcroot: &Gcroot, + drv: &nix_utils::StorePath, + requisites: T, + max_concurrent_downloads: usize, + use_substitutes: bool, +) -> anyhow::Result<()> { + use futures::stream::StreamExt as _; + + let requisites = futures::StreamExt::map(tokio_stream::iter(requisites), |p| { + filter_missing(gcroot, p) + }) + .buffered(50) + .filter_map(|o| async { o }) + .collect::>() + .await; + + let use_substitutes = if use_substitutes { + Some(nix_utils::BuildOptions::substitute_only()) + } else { + None + }; + + let (input_drvs, input_srcs): (Vec<_>, Vec<_>) = requisites + .into_iter() + .partition(nix_utils::StorePath::is_drv); + + for srcs in input_srcs.chunks(max_concurrent_downloads) { + import_paths( + client.clone(), + store.clone(), + gcroot, + srcs.to_vec(), + true, + use_substitutes.as_ref(), + ) + .await?; + } + + for drvs in input_drvs.chunks(max_concurrent_downloads) { + import_paths( + client.clone(), + store.clone(), + gcroot, + drvs.to_vec(), + true, + None, // never use substitute for drvs + ) + .await?; + } + + let full_requisites = client + .clone() + .fetch_drv_requisites(crate::runner_v1::FetchRequisitesRequest { + path: drv.base_name().to_owned(), + include_outputs: true, + }) + .await? + .into_inner() + .requisites + .into_iter() + .map(|s| nix_utils::StorePath::new(&s)) + .collect::>(); + let full_requisites = futures::StreamExt::map(tokio_stream::iter(full_requisites), |p| { + filter_missing(gcroot, p) + }) + .buffered(50) + .filter_map(|o| async { o }) + .collect::>() + .await; + + for other in full_requisites.chunks(max_concurrent_downloads) { + // we can skip filtering here as we already done that + import_paths( + client.clone(), + store.clone(), + gcroot, + other.to_vec(), + false, + use_substitutes.as_ref(), + ) + .await?; + } + + Ok(()) +} + +#[tracing::instrument(skip(client, store), err)] +async fn upload_nars( + mut client: crate::runner_v1::runner_service_client::RunnerServiceClient< + tonic::transport::Channel, + >, + store: nix_utils::LocalStore, + nars: Vec, +) -> anyhow::Result<()> { + log::debug!("Start uploading paths"); + let (tx, rx) = tokio::sync::mpsc::unbounded_channel::(); + let closure = move |data: &[u8]| { + let data = Vec::from(data); + tx.send(crate::runner_v1::NarData { chunk: data }).is_ok() + }; + let a = client + .build_result(tokio_stream::wrappers::UnboundedReceiverStream::new(rx)) + .map_err(Into::::into); + + let b = tokio::task::spawn_blocking(move || { + async move { + store.export_paths(&nars, closure)?; + log::debug!("Finished exporting paths"); + Ok::<(), anyhow::Error>(()) + } + .in_current_span() + }) + .await? + .map_err(Into::::into); + futures::future::try_join(a, b).await?; + log::debug!("Finished uploading paths"); + Ok(()) +} + +#[tracing::instrument(skip(store, drv_info), fields(%drv), ret(level = tracing::Level::DEBUG), err)] +async fn new_success_build_result_info( + store: nix_utils::LocalStore, + machine_id: uuid::Uuid, + drv: &nix_utils::StorePath, + drv_info: nix_utils::Derivation, + import_elapsed: std::time::Duration, + build_elapsed: std::time::Duration, +) -> anyhow::Result { + let outputs = &drv_info + .outputs + .iter() + .filter_map(|o| o.path.as_ref()) + .collect::>(); + let pathinfos = store.query_path_infos(outputs); + + let nix_support = shared::parse_nix_support_from_outputs(&drv_info.outputs).await?; + Ok(crate::runner_v1::BuildResultInfo { + machine_id: machine_id.to_string(), + drv: drv.base_name().to_owned(), + import_time_ms: u64::try_from(import_elapsed.as_millis())?, + build_time_ms: u64::try_from(build_elapsed.as_millis())?, + result_state: BuildResultState::Success as i32, + outputs: drv_info + .outputs + .into_iter() + .map(|o| crate::runner_v1::Output { + output: Some(match o.path { + Some(p) => { + if let Some(info) = pathinfos.get(&p) { + crate::runner_v1::output::Output::Withpath( + crate::runner_v1::OutputWithPath { + name: o.name, + closure_size: store.compute_closure_size(&p), + path: p.into_base_name(), + nar_size: info.nar_size, + nar_hash: info.nar_hash.clone(), + }, + ) + } else { + crate::runner_v1::output::Output::Nameonly( + crate::runner_v1::OutputNameOnly { name: o.name }, + ) + } + } + None => crate::runner_v1::output::Output::Nameonly( + crate::runner_v1::OutputNameOnly { name: o.name }, + ), + }), + }) + .collect(), + nix_support: Some(crate::runner_v1::NixSupport { + metrics: nix_support + .metrics + .into_iter() + .map(|m| crate::runner_v1::BuildMetric { + path: m.path, + name: m.name, + unit: m.unit, + value: m.value, + }) + .collect(), + failed: nix_support.failed, + hydra_release_name: nix_support.hydra_release_name, + products: nix_support + .products + .into_iter() + .map(|p| crate::runner_v1::BuildProduct { + path: p.path, + default_path: p.default_path, + r#type: p.r#type, + subtype: p.subtype, + name: p.name, + is_regular: p.is_regular, + sha256hash: p.sha256hash, + file_size: p.file_size, + }) + .collect(), + }), + }) +} diff --git a/src/hydra-queue-runner-v2/builder/src/system.rs b/src/hydra-queue-runner-v2/builder/src/system.rs new file mode 100644 index 000000000..890c0541b --- /dev/null +++ b/src/hydra-queue-runner-v2/builder/src/system.rs @@ -0,0 +1,219 @@ +use procfs_core::FromRead as _; + +pub struct BaseSystemInfo { + pub cpu_count: usize, + pub bogomips: f32, + pub total_memory: u64, +} + +impl BaseSystemInfo { + #[cfg(target_os = "linux")] + pub fn new() -> anyhow::Result { + let cpuinfo = procfs_core::CpuInfo::from_file("/proc/cpuinfo")?; + let meminfo = procfs_core::Meminfo::from_file("/proc/meminfo")?; + let bogomips = cpuinfo + .fields + .get("bogomips") + .and_then(|v| v.parse::().ok()) + .unwrap_or(0.0); + + Ok(Self { + cpu_count: cpuinfo.num_cores(), + bogomips, + total_memory: meminfo.mem_total, + }) + } + + #[cfg(target_os = "macos")] + pub fn new() -> anyhow::Result { + let mut sys = sysinfo::System::new_all(); + sys.refresh_memory(); + sys.refresh_cpu_all(); + + Ok(Self { + cpu_count: sys.cpus().len(), + bogomips: 0.0, + total_memory: sys.total_memory(), + }) + } +} + +pub struct Pressure { + pub avg10: f32, + pub avg60: f32, + pub avg300: f32, + pub total: u64, +} + +#[cfg(target_os = "linux")] +impl Pressure { + fn new(record: &procfs_core::PressureRecord) -> Self { + Self { + avg10: record.avg10, + avg60: record.avg60, + avg300: record.avg300, + total: record.total, + } + } +} + +impl From for crate::runner_v1::Pressure { + fn from(val: Pressure) -> Self { + Self { + avg10: val.avg10, + avg60: val.avg60, + avg300: val.avg300, + total: val.total, + } + } +} + +pub struct PressureState { + pub cpu_some: Option, + pub mem_some: Option, + pub mem_full: Option, + pub io_some: Option, + pub io_full: Option, + pub irq_full: Option, +} + +// TODO: remove once https://github.com/eminence/procfs/issues/351 is resolved +// Next 3 Functions are copied from https://github.com/eminence/procfs/blob/v0.17.0/procfs-core/src/pressure.rs#L93 +// LICENSE is Apache2.0/MIT +#[cfg(target_os = "linux")] +fn get_f32( + map: &std::collections::HashMap<&str, &str>, + value: &str, +) -> procfs_core::ProcResult { + map.get(value).map_or_else( + || Err(procfs_core::ProcError::Incomplete(None)), + |v| { + v.parse::() + .map_err(|_| procfs_core::ProcError::Incomplete(None)) + }, + ) +} + +#[cfg(target_os = "linux")] +fn get_total(map: &std::collections::HashMap<&str, &str>) -> procfs_core::ProcResult { + map.get("total").map_or_else( + || Err(procfs_core::ProcError::Incomplete(None)), + |v| { + v.parse::() + .map_err(|_| procfs_core::ProcError::Incomplete(None)) + }, + ) +} + +#[cfg(target_os = "linux")] +fn parse_pressure_record(line: &str) -> procfs_core::ProcResult { + let mut parsed = std::collections::HashMap::new(); + + if !line.starts_with("some") && !line.starts_with("full") { + return Err(procfs_core::ProcError::Incomplete(None)); + } + + let values = &line[5..]; + + for kv_str in values.split_whitespace() { + let kv_split = kv_str.split('='); + let vec: Vec<&str> = kv_split.collect(); + if vec.len() == 2 { + parsed.insert(vec[0], vec[1]); + } + } + + Ok(procfs_core::PressureRecord { + avg10: get_f32(&parsed, "avg10")?, + avg60: get_f32(&parsed, "avg60")?, + avg300: get_f32(&parsed, "avg300")?, + total: get_total(&parsed)?, + }) +} + +#[cfg(target_os = "linux")] +impl PressureState { + pub fn new() -> Option { + if !std::fs::exists("/proc/pressure").unwrap_or_default() { + return None; + } + + let cpu_psi = procfs_core::CpuPressure::from_file("proc/pressure/cpu").ok(); + let mem_psi = procfs_core::MemoryPressure::from_file("/proc/pressure/memory").ok(); + let io_psi = procfs_core::IoPressure::from_file("/proc/pressure/io").ok(); + let irq_psi_full = std::fs::read_to_string("/proc/pressure/irq") + .ok() + .and_then(|v| parse_pressure_record(&v).ok()); + + Some(Self { + cpu_some: cpu_psi.map(|v| Pressure::new(&v.some)), + mem_some: mem_psi.as_ref().map(|v| Pressure::new(&v.some)), + mem_full: mem_psi.map(|v| Pressure::new(&v.full)), + io_some: io_psi.as_ref().map(|v| Pressure::new(&v.some)), + io_full: io_psi.map(|v| Pressure::new(&v.full)), + irq_full: irq_psi_full.map(|v| Pressure::new(&v)), + }) + } +} + +pub struct SystemLoad { + pub load_avg_1: f32, + pub load_avg_5: f32, + pub load_avg_15: f32, + + pub mem_usage: u64, + pub pressure: Option, + + pub tmp_free_percent: f64, + pub store_free_percent: f64, +} + +pub fn get_mount_free_percent(dest: &str) -> anyhow::Result { + let stat = nix::sys::statvfs::statvfs(dest)?; + + let total_bytes = (stat.blocks() as u64) * stat.block_size(); + let free_bytes = (stat.blocks_available() as u64) * stat.block_size(); + #[allow(clippy::cast_precision_loss)] + Ok(free_bytes as f64 / total_bytes as f64 * 100.0) +} + +impl SystemLoad { + #[cfg(target_os = "linux")] + pub fn new() -> anyhow::Result { + let meminfo = procfs_core::Meminfo::from_file("/proc/meminfo")?; + let load = procfs_core::LoadAverage::from_file("/proc/loadavg")?; + + // TODO: prefix + let nix_store_dir = std::env::var("NIX_STORE_DIR").unwrap_or("/nix/store".to_owned()); + + Ok(Self { + load_avg_1: load.one, + load_avg_5: load.five, + load_avg_15: load.fifteen, + mem_usage: meminfo.mem_total - meminfo.mem_available.unwrap_or(0), + pressure: PressureState::new(), + tmp_free_percent: get_mount_free_percent("/tmp").unwrap_or(0.), + store_free_percent: get_mount_free_percent(&nix_store_dir).unwrap_or(0.), + }) + } + + #[cfg(target_os = "macos")] + pub fn new() -> anyhow::Result { + let mut sys = sysinfo::System::new_all(); + sys.refresh_memory(); + let load = sysinfo::System::load_average(); + + // TODO: prefix + let nix_store_dir = std::env::var("NIX_STORE_DIR").unwrap_or("/nix/store".to_owned()); + + Ok(Self { + load_avg_1: load.one as f32, + load_avg_5: load.five as f32, + load_avg_15: load.fifteen as f32, + mem_usage: sys.used_memory(), + pressure: None, + tmp_free_percent: get_mount_free_percent("/tmp").unwrap_or(0.), + store_free_percent: get_mount_free_percent(&nix_store_dir).unwrap_or(0.), + }) + } +} diff --git a/src/hydra-queue-runner-v2/clippy.toml b/src/hydra-queue-runner-v2/clippy.toml new file mode 100644 index 000000000..8d656e139 --- /dev/null +++ b/src/hydra-queue-runner-v2/clippy.toml @@ -0,0 +1,4 @@ +ignore-interior-mutability = [ + "bytes::Bytes", + "queue_runner::build::Step", +] diff --git a/src/hydra-queue-runner-v2/crates/db/Cargo.toml b/src/hydra-queue-runner-v2/crates/db/Cargo.toml new file mode 100644 index 000000000..5020c6bf0 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/db/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "db" +version = "0.1.0" +edition = "2024" +license = "GPL-3.0" + +[dependencies] +tracing = "0.1" +anyhow = "1.0.98" +futures = "0.3" +ahash = "0.8.11" + +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +chrono = { version = "0.4.38", default-features = false, features = [ + "clock", + "std", + "serde", +] } + +sqlx = { version = "0.8", features = [ + "runtime-tokio", + "tls-native-tls", + "postgres", + "chrono", +] } diff --git a/src/hydra-queue-runner-v2/crates/db/src/connection.rs b/src/hydra-queue-runner-v2/crates/db/src/connection.rs new file mode 100644 index 000000000..8c3ed6f80 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/db/src/connection.rs @@ -0,0 +1,989 @@ +use sqlx::Acquire; + +use super::models::{ + Build, BuildSmall, BuildStatus, BuildSteps, InsertBuildMetric, InsertBuildProduct, + InsertBuildStep, InsertBuildStepOutput, Jobset, UpdateBuild, UpdateBuildStep, + UpdateBuildStepInFinish, +}; + +pub struct Connection { + conn: sqlx::pool::PoolConnection, +} + +pub struct Transaction<'a> { + tx: sqlx::PgTransaction<'a>, +} + +impl Connection { + pub fn new(conn: sqlx::pool::PoolConnection) -> Self { + Self { conn } + } + + #[tracing::instrument(skip(self), err)] + pub async fn begin_transaction(&mut self) -> sqlx::Result> { + let tx = self.conn.begin().await?; + Ok(Transaction { tx }) + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_not_finished_builds_fast(&mut self) -> sqlx::Result> { + sqlx::query_as!( + BuildSmall, + r#" + SELECT + id, + globalPriority + FROM builds + WHERE finished = 0;"# + ) + .fetch_all(&mut *self.conn) + .await + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_not_finished_builds(&mut self) -> sqlx::Result> { + sqlx::query_as!( + Build, + r#" + SELECT + builds.id, + builds.jobset_id, + jobsets.project as project, + jobsets.name as jobset, + job, + drvPath, + maxsilent, + timeout, + timestamp, + globalPriority, + priority + FROM builds + INNER JOIN jobsets ON builds.jobset_id = jobsets.id + WHERE finished = 0 ORDER BY globalPriority desc, schedulingshares, random();"# + ) + .fetch_all(&mut *self.conn) + .await + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_jobsets(&mut self) -> sqlx::Result> { + sqlx::query_as!( + Jobset, + r#" + SELECT + project, + name, + schedulingshares + FROM jobsets"# + ) + .fetch_all(&mut *self.conn) + .await + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_jobset_scheduling_shares( + &mut self, + jobset_id: i32, + ) -> sqlx::Result> { + Ok(sqlx::query!( + "SELECT schedulingshares FROM jobsets WHERE id = $1", + jobset_id, + ) + .fetch_optional(&mut *self.conn) + .await? + .map(|v| v.schedulingshares)) + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_jobset_build_steps( + &mut self, + jobset_id: i32, + scheduling_window: i64, + ) -> sqlx::Result> { + #[allow(clippy::cast_precision_loss)] + sqlx::query_as!( + BuildSteps, + r#" + SELECT s.startTime, s.stopTime FROM buildsteps s join builds b on build = id + WHERE + s.startTime IS NOT NULL AND + to_timestamp(s.stopTime) > (NOW() - (interval '1 second' * $1)) AND + jobset_id = $2 + "#, + Some((scheduling_window * 10) as f64), + jobset_id, + ) + .fetch_all(&mut *self.conn) + .await + } + + #[tracing::instrument(skip(self), err)] + pub async fn abort_build(&mut self, build_id: i32) -> sqlx::Result<()> { + #[allow(clippy::cast_possible_truncation)] + sqlx::query!( + "UPDATE builds SET finished = 1, buildStatus = $2, startTime = $3, stopTime = $3 where id = $1 and finished = 0", + build_id, + BuildStatus::Aborted as i32, + // TODO migrate to 64bit timestamp + chrono::Utc::now().timestamp() as i32, + ) + .execute(&mut *self.conn) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, paths), err)] + pub async fn check_if_paths_failed(&mut self, paths: &[String]) -> sqlx::Result { + Ok( + !sqlx::query!("SELECT path FROM failedpaths where path = ANY($1)", paths) + .fetch_all(&mut *self.conn) + .await? + .is_empty(), + ) + } + + #[tracing::instrument(skip(self), err)] + pub async fn clear_busy(&mut self, stop_time: i32) -> sqlx::Result<()> { + sqlx::query!( + "UPDATE buildsteps SET busy = 0, status = $1, stopTime = $2 WHERE busy != 0;", + BuildStatus::Aborted as i32, + Some(stop_time), + ) + .execute(&mut *self.conn) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, step), err)] + pub async fn update_build_step(&mut self, step: UpdateBuildStep) -> sqlx::Result<()> { + sqlx::query!( + "UPDATE buildsteps SET busy = $1 WHERE build = $2 AND stepnr = $3 AND busy != 0 AND status IS NULL", + step.status as i32, + step.build_id, + step.step_nr, + ) + .execute(&mut *self.conn) + .await?; + Ok(()) + } + + pub async fn insert_debug_build( + &mut self, + jobset_id: i32, + drv_path: &str, + system: &str, + ) -> sqlx::Result<()> { + sqlx::query!( + r#"INSERT INTO builds ( + finished, + timestamp, + jobset_id, + job, + nixname, + drvpath, + system, + maxsilent, + timeout, + ischannel, + iscurrent, + priority, + globalpriority, + keep + ) VALUES ( + 0, + EXTRACT(EPOCH FROM NOW())::INT4, + $1, + 'debug', + 'debug', + $2, + $3, + 7200, + 36000, + 0, + 0, + 100, + 0, + 0);"#, + jobset_id, + drv_path, + system, + ) + .execute(&mut *self.conn) + .await?; + Ok(()) + } + + pub async fn get_build_output_for_path( + &mut self, + out_path: &str, + ) -> sqlx::Result> { + sqlx::query_as!( + super::models::BuildOutput, + r#" + SELECT + id, buildStatus, releaseName, closureSize, size + FROM builds b + JOIN buildoutputs o on b.id = o.build + WHERE finished = 1 and (buildStatus = 0 or buildStatus = 6) and path = $1;"#, + out_path, + ) + .fetch_optional(&mut *self.conn) + .await + } + + pub async fn get_build_products_for_build_id( + &mut self, + build_id: i32, + ) -> sqlx::Result> { + sqlx::query_as!( + super::models::OwnedBuildProduct, + r#" + SELECT + type, + subtype, + fileSize, + sha256hash, + path, + name, + defaultPath + FROM buildproducts + WHERE build = $1 ORDER BY productnr;"#, + build_id + ) + .fetch_all(&mut *self.conn) + .await + } + + pub async fn get_build_metrics_for_build_id( + &mut self, + build_id: i32, + ) -> sqlx::Result> { + sqlx::query_as!( + crate::models::OwnedBuildMetric, + r#" + SELECT + name, unit, value + FROM buildmetrics + WHERE build = $1;"#, + build_id + ) + .fetch_all(&mut *self.conn) + .await + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_status(&mut self) -> sqlx::Result> { + Ok( + sqlx::query!("SELECT status FROM systemstatus WHERE what = 'queue-runner';",) + .fetch_optional(&mut *self.conn) + .await? + .map(|v| v.status), + ) + } +} + +impl Transaction<'_> { + #[tracing::instrument(skip(self), err)] + pub async fn commit(self) -> sqlx::Result<()> { + self.tx.commit().await + } + + #[tracing::instrument(skip(self, v), err)] + pub async fn update_build(&mut self, build_id: i32, v: UpdateBuild<'_>) -> sqlx::Result<()> { + sqlx::query!( + r#" + UPDATE builds SET + finished = 1, + buildStatus = $2, + startTime = $3, + stopTime = $4, + size = $5, + closureSize = $6, + releaseName = $7, + isCachedBuild = $8, + notificationPendingSince = $4 + WHERE + id = $1"#, + build_id, + v.status as i32, + v.start_time, + v.stop_time, + v.size, + v.closure_size, + v.release_name, + i32::from(v.is_cached_build), + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, status, start_time, stop_time, is_cached_build), err)] + pub async fn update_build_after_failure( + &mut self, + build_id: i32, + status: BuildStatus, + start_time: i32, + stop_time: i32, + is_cached_build: bool, + ) -> sqlx::Result<()> { + sqlx::query!( + r#" + UPDATE builds SET + finished = 1, + buildStatus = $2, + startTime = $3, + stopTime = $4, + isCachedBuild = $5, + notificationPendingSince = $4 + WHERE + id = $1 AND finished = 0"#, + build_id, + status as i32, + start_time, + stop_time, + i32::from(is_cached_build), + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, status), err)] + pub async fn update_build_after_previous_failure( + &mut self, + build_id: i32, + status: BuildStatus, + ) -> sqlx::Result<()> { + #[allow(clippy::cast_possible_truncation)] + sqlx::query!( + r#" + UPDATE builds SET + finished = 1, + buildStatus = $2, + startTime = $3, + stopTime = $3, + isCachedBuild = 1, + notificationPendingSince = $3 + WHERE + id = $1 AND finished = 0"#, + build_id, + status as i32, + // TODO migrate to 64bit timestamp + chrono::Utc::now().timestamp() as i32, + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, name, path), err)] + pub async fn update_build_output( + &mut self, + build_id: i32, + name: &str, + path: &str, + ) -> sqlx::Result<()> { + // TODO: support inserting multiple at the same time + sqlx::query!( + "UPDATE buildoutputs SET path = $3 WHERE build = $1 AND name = $2", + build_id, + name, + path, + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_last_build_step_id(&mut self, path: &str) -> sqlx::Result> { + Ok(sqlx::query!("SELECT MAX(build) FROM buildsteps WHERE drvPath = $1 and startTime != 0 and stopTime != 0 and status = 1", path) + .fetch_optional(&mut *self.tx) + .await? + .and_then(|v| v.max)) + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_last_build_step_id_for_output_path( + &mut self, + path: &str, + ) -> sqlx::Result> { + Ok(sqlx::query!( + r#" + SELECT MAX(s.build) FROM buildsteps s + JOIN BuildStepOutputs o ON s.build = o.build + WHERE startTime != 0 + AND stopTime != 0 + AND status = 1 + AND path = $1 + "#, + path, + ) + .fetch_optional(&mut *self.tx) + .await? + .and_then(|v| v.max)) + } + + #[tracing::instrument(skip(self, drv_path, name), err)] + pub async fn get_last_build_step_id_for_output_with_drv( + &mut self, + drv_path: &str, + name: &str, + ) -> sqlx::Result> { + Ok(sqlx::query!( + r#" + SELECT MAX(s.build) FROM buildsteps s + JOIN BuildStepOutputs o ON s.build = o.build + WHERE startTime != 0 + AND stopTime != 0 + AND status = 1 + AND drvPath = $1 + AND name = $2 + "#, + drv_path, + name, + ) + .fetch_optional(&mut *self.tx) + .await? + .and_then(|v| v.max)) + } + + #[tracing::instrument(skip(self), err)] + pub async fn alloc_build_step(&mut self, build_id: i32) -> sqlx::Result { + Ok(sqlx::query!( + "SELECT MAX(stepnr) FROM buildsteps WHERE build = $1", + build_id + ) + .fetch_optional(&mut *self.tx) + .await? + .and_then(|v| v.max) + .map_or(1, |v| v + 1)) + } + + #[tracing::instrument(skip(self, step), err)] + pub async fn insert_build_step(&mut self, step: InsertBuildStep<'_>) -> sqlx::Result { + let success = sqlx::query!( + r#" + INSERT INTO buildsteps ( + build, + stepnr, + type, + drvPath, + busy, + startTime, + stopTime, + system, + status, + propagatedFrom, + errorMsg, + machine + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12 + ) + ON CONFLICT DO NOTHING + "#, + step.build_id, + step.step_nr, + step.r#type as i32, + step.drv_path, + i32::from(step.busy), + step.start_time, + step.stop_time, + step.platform, + if step.status == BuildStatus::Busy { + None + } else { + Some(step.status as i32) + }, + step.propagated_from, + step.error_msg, + step.machine, + ) + .execute(&mut *self.tx) + .await? + .rows_affected() + != 0; + Ok(success) + } + + #[tracing::instrument(skip(self, outputs), err)] + pub async fn insert_build_step_outputs( + &mut self, + outputs: &[InsertBuildStepOutput], + ) -> sqlx::Result<()> { + if outputs.is_empty() { + return Ok(()); + } + + let mut query_builder = + sqlx::QueryBuilder::new("INSERT INTO buildstepoutputs (build, stepnr, name, path) "); + + query_builder.push_values(outputs, |mut b, output| { + b.push_bind(output.build_id) + .push_bind(output.step_nr) + .push_bind(&output.name) + .push_bind(&output.path); + }); + let query = query_builder.build(); + query.execute(&mut *self.tx).await?; + Ok(()) + } + + #[tracing::instrument(skip(self, name, path), err)] + pub async fn update_build_step_output( + &mut self, + build_id: i32, + step_nr: i32, + name: &str, + path: &str, + ) -> sqlx::Result<()> { + // TODO: support inserting multiple at the same time + sqlx::query!( + "UPDATE buildstepoutputs SET path = $4 WHERE build = $1 AND stepnr = $2 AND name = $3", + build_id, + step_nr, + name, + path, + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, res), err)] + pub async fn update_build_step_in_finish( + &mut self, + res: UpdateBuildStepInFinish<'_>, + ) -> sqlx::Result<()> { + sqlx::query!( + r#" + UPDATE buildsteps SET + busy = 0, + status = $1, + errorMsg = $4, + startTime = $5, + stopTime = $6, + machine = $7, + overhead = $8, + timesBuilt = $9, + isNonDeterministic = $10 + WHERE + build = $2 AND stepnr = $3 + "#, + res.status as i32, + res.build_id, + res.step_nr, + res.error_msg, + res.start_time, + res.stop_time, + res.machine, + res.overhead, + res.times_built, + res.is_non_deterministic, + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, build_id, step_nr), err)] + pub async fn get_drv_path_from_build_step( + &mut self, + build_id: i32, + step_nr: i32, + ) -> sqlx::Result> { + Ok(sqlx::query!( + "SELECT drvPath FROM BuildSteps WHERE build = $1 AND stepnr = $2", + build_id, + step_nr + ) + .fetch_optional(&mut *self.tx) + .await? + .and_then(|v| v.drvpath)) + } + + #[tracing::instrument(skip(self, build_id), err)] + pub async fn check_if_build_is_not_finished(&mut self, build_id: i32) -> sqlx::Result { + Ok(sqlx::query!( + "SELECT id FROM builds WHERE id = $1 AND finished = 0", + build_id, + ) + .fetch_optional(&mut *self.tx) + .await? + .is_some()) + } + + #[tracing::instrument(skip(self, p), err)] + pub async fn insert_build_product(&mut self, p: InsertBuildProduct<'_>) -> sqlx::Result<()> { + sqlx::query!( + r#" + INSERT INTO buildproducts ( + build, + productnr, + type, + subtype, + fileSize, + sha256hash, + path, + name, + defaultPath + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8, $9 + ) + "#, + p.build_id, + p.product_nr, + p.r#type, + p.subtype, + p.file_size, + p.sha256hash, + p.path, + p.name, + p.default_path, + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, build_id), err)] + pub async fn delete_build_products_by_build_id(&mut self, build_id: i32) -> sqlx::Result<()> { + sqlx::query!("DELETE FROM buildproducts WHERE build = $1", build_id) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, metric), err)] + pub async fn insert_build_metric(&mut self, metric: InsertBuildMetric<'_>) -> sqlx::Result<()> { + sqlx::query!( + r#" + INSERT INTO buildmetrics ( + build, + name, + unit, + value, + project, + jobset, + job, + timestamp + ) VALUES ( + $1, $2, $3, $4, $5, $6, $7, $8 + ) + "#, + metric.build_id, + metric.name, + metric.unit, + metric.value, + metric.project, + metric.jobset, + metric.job, + metric.timestamp, + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, build_id), err)] + pub async fn delete_build_metrics_by_build_id(&mut self, build_id: i32) -> sqlx::Result<()> { + sqlx::query!("DELETE FROM buildmetrics WHERE build = $1", build_id) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, path), err)] + pub async fn insert_failed_paths(&mut self, path: &str) -> sqlx::Result<()> { + sqlx::query!( + r#" + INSERT INTO failedpaths ( + path + ) VALUES ( + $1 + ) + "#, + path, + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + #[tracing::instrument( + skip( + self, + start_time, + build_id, + platform, + machine, + status, + error_msg, + propagated_from + ), + err + )] + pub async fn create_build_step( + &mut self, + start_time: Option, + build_id: crate::models::BuildID, + drv_path: &str, + platform: Option<&str>, + machine: String, + status: crate::models::BuildStatus, + error_msg: Option, + propagated_from: Option, + outputs: Vec<(String, Option)>, + ) -> sqlx::Result { + let start_time = start_time.and_then(|start_time| i32::try_from(start_time).ok()); // TODO + + let step_nr = loop { + let step_nr = self.alloc_build_step(build_id).await?; + if self + .insert_build_step(crate::models::InsertBuildStep { + build_id, + step_nr, + r#type: crate::models::BuildType::Build, + drv_path, + status, + busy: status == crate::models::BuildStatus::Busy, + start_time, + stop_time: if status == crate::models::BuildStatus::Busy { + None + } else { + start_time + }, + platform, + propagated_from, + error_msg: error_msg.as_deref(), + machine: &machine, + }) + .await? + { + break step_nr; + } + }; + + self.insert_build_step_outputs( + &outputs + .into_iter() + .map(|(name, path)| crate::models::InsertBuildStepOutput { + build_id, + step_nr, + name, + path, + }) + .collect::>(), + ) + .await?; + + if status == crate::models::BuildStatus::Busy { + self.notify_step_started(build_id, step_nr).await?; + } + + Ok(step_nr) + } + + #[tracing::instrument( + skip(self, start_time, stop_time, build_id, drv_path, output,), + err, + ret + )] + pub async fn create_substitution_step( + &mut self, + start_time: i32, + stop_time: i32, + build_id: crate::models::BuildID, + drv_path: &str, + output: (String, Option), + ) -> anyhow::Result { + let step_nr = loop { + let step_nr = self.alloc_build_step(build_id).await?; + if self + .insert_build_step(crate::models::InsertBuildStep { + build_id, + step_nr, + r#type: crate::models::BuildType::Substitution, + drv_path, + status: crate::models::BuildStatus::Success, + busy: false, + start_time: Some(start_time), + stop_time: Some(stop_time), + platform: None, + propagated_from: None, + error_msg: None, + machine: "", + }) + .await? + { + break step_nr; + } + }; + + self.insert_build_step_outputs(&[crate::models::InsertBuildStepOutput { + build_id, + step_nr, + name: output.0, + path: output.1, + }]) + .await?; + + Ok(step_nr) + } + + #[tracing::instrument(skip(self, build, is_cached_build, start_time, stop_time,), err)] + pub async fn mark_succeeded_build( + &mut self, + build: crate::models::MarkBuildSuccessData<'_>, + is_cached_build: bool, + start_time: i32, + stop_time: i32, + ) -> anyhow::Result<()> { + if build.finished_in_db { + return Ok(()); + } + + if !self.check_if_build_is_not_finished(build.id).await? { + return Ok(()); + } + + self.update_build( + build.id, + crate::models::UpdateBuild { + status: if build.failed { + crate::models::BuildStatus::FailedWithOutput + } else { + crate::models::BuildStatus::Success + }, + start_time, + stop_time, + size: i64::try_from(build.size)?, + closure_size: i64::try_from(build.closure_size)?, + release_name: build.release_name, + is_cached_build, + }, + ) + .await?; + + for (name, path) in &build.outputs { + self.update_build_output(build.id, name, path).await?; + } + + self.delete_build_products_by_build_id(build.id).await?; + + for (nr, p) in build.products.iter().enumerate() { + self.insert_build_product(crate::models::InsertBuildProduct { + build_id: build.id, + product_nr: i32::try_from(nr + 1)?, + r#type: p.r#type, + subtype: p.subtype, + file_size: p.filesize, + sha256hash: p.sha256hash, + path: p.path.as_deref().unwrap_or_default(), + name: p.name, + default_path: p.defaultpath.unwrap_or_default(), + }) + .await?; + } + + self.delete_build_metrics_by_build_id(build.id).await?; + for m in &build.metrics { + self.insert_build_metric(crate::models::InsertBuildMetric { + build_id: build.id, + name: m.1.name, + unit: m.1.unit, + value: m.1.value, + project: build.project_name, + jobset: build.jobset_name, + job: build.name, + timestamp: i32::try_from(build.timestamp.timestamp())?, // TODO + }) + .await?; + } + Ok(()) + } + + #[tracing::instrument(skip(self, status), err)] + pub async fn upsert_status(&mut self, status: &serde_json::Value) -> sqlx::Result<()> { + sqlx::query!( + r#"INSERT INTO systemstatus ( + what, status + ) VALUES ( + 'queue-runner', $1 + ) ON CONFLICT (what) DO UPDATE SET status = EXCLUDED.status;"#, + Some(status), + ) + .execute(&mut *self.tx) + .await?; + Ok(()) + } +} + +impl Transaction<'_> { + #[tracing::instrument(skip(self), err)] + async fn notify_any(&mut self, channel: &str, msg: &str) -> sqlx::Result<()> { + sqlx::query( + r"SELECT pg_notify(chan, payload) from (values ($1, $2)) notifies(chan, payload)", + ) + .bind(channel) + .bind(msg) + .execute(&mut *self.tx) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self), err)] + pub async fn notify_builds_added(&mut self) -> sqlx::Result<()> { + self.notify_any("builds_added", "?").await?; + Ok(()) + } + + #[tracing::instrument(skip(self, build_id, dependent_ids,), err)] + pub async fn notify_build_finished( + &mut self, + build_id: i32, + dependent_ids: &[i32], + ) -> sqlx::Result<()> { + let mut q = vec![build_id.to_string()]; + q.extend(dependent_ids.iter().map(ToString::to_string)); + + self.notify_any("build_finished", &q.join("\t")).await?; + Ok(()) + } + + #[tracing::instrument(skip(self, build_id, step_nr,), err)] + pub async fn notify_step_started(&mut self, build_id: i32, step_nr: i32) -> sqlx::Result<()> { + self.notify_any("step_started", &format!("{build_id}\t{step_nr}")) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self, build_id, step_nr, log_file,), err)] + pub async fn notify_step_finished( + &mut self, + build_id: i32, + step_nr: i32, + log_file: &str, + ) -> sqlx::Result<()> { + self.notify_any( + "step_finished", + &format!("{build_id}\t{step_nr}\t{log_file}"), + ) + .await?; + Ok(()) + } + + #[tracing::instrument(skip(self), err)] + pub async fn notify_dump_status(&mut self) -> sqlx::Result<()> { + self.notify_any("dump_status", "").await?; + Ok(()) + } + + #[tracing::instrument(skip(self), err)] + pub async fn notify_status_dumped(&mut self) -> sqlx::Result<()> { + self.notify_any("status_dumped", "").await?; + Ok(()) + } +} diff --git a/src/hydra-queue-runner-v2/crates/db/src/lib.rs b/src/hydra-queue-runner-v2/crates/db/src/lib.rs new file mode 100644 index 000000000..de6d486e0 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/db/src/lib.rs @@ -0,0 +1,47 @@ +mod connection; +pub mod models; + +use std::str::FromStr as _; + +pub use connection::{Connection, Transaction}; +pub use sqlx::Error; + +#[derive(Clone)] +pub struct Database { + pool: sqlx::PgPool, +} + +impl Database { + pub async fn new(url: &str, max_connections: u32) -> Result { + Ok(Self { + pool: sqlx::postgres::PgPoolOptions::new() + .max_connections(max_connections) + .connect(url) + .await?, + }) + } + + pub async fn get(&self) -> Result { + let conn = self.pool.acquire().await?; + Ok(Connection::new(conn)) + } + + pub fn reconfigure_pool(&self, url: &str) -> anyhow::Result<()> { + // TODO: ability to change max_connections by dropping the pool and recreating it + self.pool + .set_connect_options(sqlx::postgres::PgConnectOptions::from_str(url)?); + Ok(()) + } + + pub async fn listener( + &self, + channels: Vec<&str>, + ) -> Result< + impl futures::Stream> + Unpin, + sqlx::Error, + > { + let mut listener = sqlx::postgres::PgListener::connect_with(&self.pool).await?; + listener.listen_all(channels).await?; + Ok(listener.into_stream()) + } +} diff --git a/src/hydra-queue-runner-v2/crates/db/src/models.rs b/src/hydra-queue-runner-v2/crates/db/src/models.rs new file mode 100644 index 000000000..85ade61b0 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/db/src/models.rs @@ -0,0 +1,221 @@ +pub type BuildID = i32; + +#[repr(i32)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum BuildStatus { + Success = 0, + Failed = 1, + DepFailed = 2, // builds only + Aborted = 3, + Cancelled = 4, + FailedWithOutput = 6, // builds only + TimedOut = 7, + CachedFailure = 8, // steps only + Unsupported = 9, + LogLimitExceeded = 10, + NarSizeLimitExceeded = 11, + NotDeterministic = 12, + Busy = 100, // not stored +} + +impl BuildStatus { + pub fn from_i32(v: i32) -> Option { + match v { + 0 => Some(Self::Success), + 1 => Some(Self::Failed), + 2 => Some(Self::DepFailed), + 3 => Some(Self::Aborted), + 4 => Some(Self::Cancelled), + 6 => Some(Self::FailedWithOutput), + 7 => Some(Self::TimedOut), + 8 => Some(Self::CachedFailure), + 9 => Some(Self::Unsupported), + 10 => Some(Self::LogLimitExceeded), + 11 => Some(Self::NarSizeLimitExceeded), + 12 => Some(Self::NotDeterministic), + 100 => Some(Self::Busy), + _ => None, + } + } +} + +#[repr(i32)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[allow(dead_code)] +pub enum StepStatus { + Preparing = 1, + Connecting = 10, + SendingInputs = 20, + Building = 30, + WaitingForLocalSlot = 35, + ReceivingOutputs = 40, + PostProcessing = 50, +} + +pub struct Jobset { + pub project: String, + pub name: String, + pub schedulingshares: i32, +} + +pub struct BuildSmall { + pub id: BuildID, + pub globalpriority: i32, +} + +pub struct Build { + pub id: BuildID, + pub jobset_id: i32, + pub project: String, + pub jobset: String, + pub job: String, + pub drvpath: String, + pub maxsilent: Option, // maxsilent integer default 3600 + pub timeout: Option, // timeout integer default 36000 + // // pub timestamp: chrono::NaiveDateTime, + pub timestamp: i64, + pub globalpriority: i32, + pub priority: i32, +} + +pub struct BuildSteps { + pub starttime: Option, + pub stoptime: Option, +} + +#[repr(i32)] +pub enum BuildType { + Build = 0, + Substitution = 1, +} + +pub struct UpdateBuild<'a> { + pub status: BuildStatus, + pub start_time: i32, + pub stop_time: i32, + pub size: i64, + pub closure_size: i64, + pub release_name: Option<&'a str>, + pub is_cached_build: bool, +} + +pub struct InsertBuildStep<'a> { + pub build_id: BuildID, + pub step_nr: i32, + pub r#type: BuildType, + pub drv_path: &'a str, + pub status: BuildStatus, + pub busy: bool, + pub start_time: Option, + pub stop_time: Option, + pub platform: Option<&'a str>, + pub propagated_from: Option, + pub error_msg: Option<&'a str>, + pub machine: &'a str, +} + +pub struct InsertBuildStepOutput { + pub build_id: BuildID, + pub step_nr: i32, + pub name: String, + pub path: Option, +} + +pub struct UpdateBuildStep { + pub build_id: BuildID, + pub step_nr: i32, + pub status: StepStatus, +} + +pub struct UpdateBuildStepInFinish<'a> { + pub build_id: BuildID, + pub step_nr: i32, + pub status: BuildStatus, + pub error_msg: Option<&'a str>, + pub start_time: i32, + pub stop_time: i32, + pub machine: Option<&'a str>, + pub overhead: Option, + pub times_built: Option, + pub is_non_deterministic: Option, +} + +pub struct InsertBuildProduct<'a> { + pub build_id: BuildID, + pub product_nr: i32, + pub r#type: &'a str, + pub subtype: &'a str, + pub file_size: Option, + pub sha256hash: Option<&'a str>, + pub path: &'a str, + pub name: &'a str, + pub default_path: &'a str, +} + +pub struct InsertBuildMetric<'a> { + pub build_id: BuildID, + pub name: &'a str, + pub unit: Option<&'a str>, + pub value: f64, + pub project: &'a str, + pub jobset: &'a str, + pub job: &'a str, + pub timestamp: i32, +} + +pub struct BuildOutput { + pub id: i32, + pub buildstatus: Option, + pub releasename: Option, + pub closuresize: Option, + pub size: Option, +} + +pub struct OwnedBuildProduct { + pub r#type: String, + pub subtype: String, + pub filesize: Option, + pub sha256hash: Option, + pub path: Option, + pub name: String, + pub defaultpath: Option, +} + +pub struct BuildProduct<'a> { + pub r#type: &'a str, + pub subtype: &'a str, + pub filesize: Option, + pub sha256hash: Option<&'a str>, + pub path: Option, + pub name: &'a str, + pub defaultpath: Option<&'a str>, +} + +pub struct OwnedBuildMetric { + pub name: String, + pub unit: Option, + pub value: f64, +} + +pub struct BuildMetric<'a> { + pub name: &'a str, + pub unit: Option<&'a str>, + pub value: f64, +} + +pub struct MarkBuildSuccessData<'a> { + pub id: BuildID, + pub name: &'a str, + pub project_name: &'a str, + pub jobset_name: &'a str, + pub finished_in_db: bool, + pub timestamp: chrono::DateTime, + + pub failed: bool, + pub closure_size: u64, + pub size: u64, + pub release_name: Option<&'a str>, + pub outputs: ahash::AHashMap, + pub products: Vec>, + pub metrics: ahash::AHashMap<&'a str, BuildMetric<'a>>, +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/Cargo.toml b/src/hydra-queue-runner-v2/crates/nix-utils/Cargo.toml new file mode 100644 index 000000000..2fcd60496 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "nix-utils" +version = "0.1.0" +edition = "2024" +license = "LGPL-2.1-only" + +[dependencies] +log = "0.4" +tracing = "0.1" +serde = { version = "1.0", features = ["derive"] } +thiserror = "2.0" +anyhow = "1.0.98" +tokio = { version = "1.34", features = ["full"] } +tokio-stream = { version = "0.1", features = ["io-util"] } +tokio-util = { version = "0.7", features = ["io", "io-util"] } +futures = "0.3" +ahash = { version = "0.8.11", features = ["serde"] } +regex = "1" +sha2 = "0.10" +url = "2.5.4" + +cxx = "1" +bytes = "1.10.1" + +nix-diff = { git = "https://github.com/mic92/nix-diff-rs.git", rev = "6c0902f9c6f756b09095e9d77b424332ff0e32e9" } + +[build-dependencies] +cxx-build = "1" +pkg-config = "0.3" diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/LICENSE b/src/hydra-queue-runner-v2/crates/nix-utils/LICENSE new file mode 100644 index 000000000..5ab7695ab --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/LICENSE @@ -0,0 +1,504 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Libraries + + If you develop a new library, and you want it to be of the greatest +possible use to the public, we recommend making it free software that +everyone can redistribute and change. You can do so by permitting +redistribution under these terms (or, alternatively, under the terms of the +ordinary General Public License). + + To apply these terms, attach the following notices to the library. It is +safest to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Also add information on how to contact you by electronic and paper mail. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the library, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the + library `Frob' (a library for tweaking knobs) written by James Random Hacker. + + , 1 April 1990 + Ty Coon, President of Vice + +That's all there is to it! + + diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/build.rs b/src/hydra-queue-runner-v2/crates/nix-utils/build.rs new file mode 100644 index 000000000..0e98dd1c6 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/build.rs @@ -0,0 +1,22 @@ +fn main() { + if std::env::var("DOCS_RS").is_ok() { + return; + } + + println!("cargo:rerun-if-changed=include/nix.h"); + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=src/nix.cpp"); + println!("cargo:rerun-if-changed=src/lib.rs"); + + let library = pkg_config::probe_library("nix-main").unwrap(); + pkg_config::probe_library("nix-store").unwrap(); + pkg_config::probe_library("nix-util").unwrap(); + pkg_config::probe_library("libsodium").unwrap(); + + cxx_build::bridge("src/lib.rs") + .file("src/nix.cpp") + .flag("-std=c++2a") + .flag("-O2") + .includes(library.include_paths) + .compile("nix_utils"); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/copy_path.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/copy_path.rs new file mode 100644 index 000000000..2f4512dea --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/copy_path.rs @@ -0,0 +1,27 @@ +use nix_utils::{self, copy_paths}; + +// requires env vars: AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY + +#[tokio::main] +async fn main() { + let local = nix_utils::LocalStore::init(); + let remote = + nix_utils::RemoteStore::init("s3://store?region=unknown&endpoint=http://localhost:9000"); + nix_utils::set_verbosity(1); + + let res = copy_paths( + local.as_base_store(), + remote.as_base_store(), + &[nix_utils::StorePath::new( + "1r5zv195y7b7b5q2daf5p82s2m6r4rg4-CVE-2024-56406.patch", + )], + false, + false, + false, + ) + .await; + println!("copy res={res:?}"); + + let stats = remote.get_s3_stats().unwrap(); + println!("stats {stats:?}"); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/drv_parse.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/drv_parse.rs new file mode 100644 index 000000000..c744f0bc4 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/drv_parse.rs @@ -0,0 +1,10 @@ +#[tokio::main] +async fn main() { + let drv = nix_utils::query_drv(&nix_utils::StorePath::new( + "5g60vyp4cbgwl12pav5apyi571smp62s-hello-2.12.2.drv", + )) + .await + .unwrap(); + + println!("{drv:?}"); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/export_file.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/export_file.rs new file mode 100644 index 000000000..d0dc9bfa8 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/export_file.rs @@ -0,0 +1,34 @@ +use nix_utils::{self, BaseStore as _}; + +#[tokio::main] +async fn main() { + let store = nix_utils::LocalStore::init(); + + let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::>(); + let closure = move |data: &[u8]| { + let data = Vec::from(data); + tx.send(data).is_ok() + }; + + let x = tokio::spawn(async move { + while let Some(x) = rx.recv().await { + print!("{}", String::from_utf8_lossy(&x)); + } + }); + + tokio::task::spawn_blocking(move || async move { + store + .export_paths( + &[nix_utils::StorePath::new( + "5g60vyp4cbgwl12pav5apyi571smp62s-hello-2.12.2.drv", + )], + closure, + ) + .unwrap(); + }) + .await + .unwrap() + .await; + + x.await.unwrap(); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/get_settings.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/get_settings.rs new file mode 100644 index 000000000..488167caf --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/get_settings.rs @@ -0,0 +1,11 @@ +fn main() { + let _store = nix_utils::LocalStore::init(); + println!("Nix prefix: {}", nix_utils::get_nix_prefix()); + println!("Store dir: {}", nix_utils::get_store_dir()); + println!("Log dir: {}", nix_utils::get_log_dir()); + println!("State dir: {}", nix_utils::get_state_dir()); + println!("System: {}", nix_utils::get_this_system()); + println!("Extra Platforms: {:?}", nix_utils::get_extra_platforms()); + println!("System features: {:?}", nix_utils::get_system_features()); + println!("Use cgroups: {}", nix_utils::get_use_cgroups()); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/import_file_fd.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/import_file_fd.rs new file mode 100644 index 000000000..2cc4bbaaa --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/import_file_fd.rs @@ -0,0 +1,35 @@ +use tokio::io::{AsyncReadExt as _, AsyncWriteExt as _}; + +use nix_utils::{self, BaseStore as _}; + +#[tokio::main] +async fn main() { + let store = nix_utils::LocalStore::init(); + + let file = tokio::fs::File::open("/tmp/test.nar").await.unwrap(); + let mut reader = tokio::io::BufReader::new(file); + + println!("Importing test.nar == 5g60vyp4cbgwl12pav5apyi571smp62s-hello-2.12.2.drv"); + let (mut rx, tx) = tokio::net::unix::pipe::pipe().unwrap(); + + tokio::spawn(async move { + let mut buf: [u8; 1] = [0; 1]; + loop { + let s = reader.read(&mut buf).await.unwrap(); + if s == 0 { + break; + } + let _ = rx.write(&buf).await.unwrap(); + } + let _ = rx.shutdown().await; + drop(rx); + }); + tokio::task::spawn_blocking(move || async move { + store + .import_paths_with_fd(tx.into_blocking_fd().unwrap(), false) + .unwrap(); + }) + .await + .unwrap() + .await; +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/import_file_stream.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/import_file_stream.rs new file mode 100644 index 000000000..197262b9a --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/import_file_stream.rs @@ -0,0 +1,11 @@ +use nix_utils::{self, BaseStore as _}; + +#[tokio::main] +async fn main() { + let store = nix_utils::LocalStore::init(); + + let file = tokio::fs::File::open("/tmp/test3.nar").await.unwrap(); + let stream = tokio_util::io::ReaderStream::new(file); + + store.import_paths(stream, false).await.unwrap(); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/is_valid_path.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/is_valid_path.rs new file mode 100644 index 000000000..dc2bdddc5 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/is_valid_path.rs @@ -0,0 +1,13 @@ +use nix_utils::{self, BaseStore as _}; + +#[tokio::main] +async fn main() { + let store = nix_utils::LocalStore::init(); + let nix_prefix = nix_utils::get_nix_prefix(); + println!( + "storepath={nix_prefix} valid={}", + store + .is_valid_path(nix_utils::StorePath::new(&nix_prefix)) + .await + ); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/path_infos.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/path_infos.rs new file mode 100644 index 000000000..68ef86a50 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/path_infos.rs @@ -0,0 +1,18 @@ +use nix_utils::BaseStore as _; + +fn main() { + let local = nix_utils::LocalStore::init(); + + let p1 = nix_utils::StorePath::new("ihl4ya67glh9815v1lanyqph0p7hdzfb-hdf5-cpp-1.14.6-bin"); + let p2 = nix_utils::StorePath::new("sgv5w811jvvxpjgmyw1n6l8hwfilha7x-hdf5-cpp-1.14.6-dev"); + let p3 = nix_utils::StorePath::new("vb6yrzk31ng8s6nzs4y4jq6qsjab3gxv-hdf5-cpp-1.14.6"); + + let infos = local.query_path_infos(&[&p1, &p2, &p3]); + + println!("{infos:?}"); + println!("closure_size {p1}: {}", local.compute_closure_size(&p1)); + println!("closure_size {p2}: {}", local.compute_closure_size(&p2)); + println!("closure_size {p3}: {}", local.compute_closure_size(&p3)); + + println!("stats: {:?}", local.get_store_stats()); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/query_requisites.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/query_requisites.rs new file mode 100644 index 000000000..faee81093 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/query_requisites.rs @@ -0,0 +1,23 @@ +use nix_utils::BaseStore as _; + +#[tokio::main] +async fn main() { + let store = nix_utils::LocalStore::init(); + + let drv = nix_utils::StorePath::new("5g60vyp4cbgwl12pav5apyi571smp62s-hello-2.12.2.drv"); + let ps = store + .query_requisites(vec![drv.clone()], false) + .await + .unwrap(); + for p in ps { + println!("{}", p.get_full_path()); + } + + println!(); + println!(); + + let ps = store.query_requisites(vec![drv], true).await.unwrap(); + for p in ps { + println!("{}", p.get_full_path()); + } +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/stream_test.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/stream_test.rs new file mode 100644 index 000000000..07d6c6e50 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/stream_test.rs @@ -0,0 +1,29 @@ +use bytes::Bytes; +use tokio::io::AsyncReadExt; +use tokio_util::io::StreamReader; + +#[tokio::main] +async fn main() -> Result<(), Box> { + // Create a stream from an iterator. + let stream = tokio_stream::iter(vec![ + tokio::io::Result::Ok(Bytes::from_static(&[0, 1, 2, 3])), + tokio::io::Result::Ok(Bytes::from_static(&[4, 5, 6, 7])), + tokio::io::Result::Ok(Bytes::from_static(&[8, 9, 10, 11])), + ]); + + // Convert it to an AsyncRead. + let mut read = StreamReader::new(stream); + + // Read five bytes from the stream. + let mut buf = [0; 2]; + + loop { + let read = read.read(&mut buf).await?; + if read == 0 { + break; + } + println!("{buf:?}"); + } + + Ok(()) +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/examples/upsert_file.rs b/src/hydra-queue-runner-v2/crates/nix-utils/examples/upsert_file.rs new file mode 100644 index 000000000..eb7290fe4 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/examples/upsert_file.rs @@ -0,0 +1,22 @@ +// requires env vars: AWS_ACCESS_KEY_ID AWS_SECRET_ACCESS_KEY + +#[tokio::main] +async fn main() { + let store = + nix_utils::RemoteStore::init("s3://store?region=unknown&endpoint=http://localhost:9000"); + nix_utils::set_verbosity(1); + let res = store + .upsert_file( + "log/z4zxibgvmk4ikarbbpwjql21wjmdvy85-dbus-1.drv".to_string(), + std::path::PathBuf::from( + concat!(env!("CARGO_MANIFEST_DIR"), "/examples/upsert_file.rs").to_string(), + ), + "text/plain; charset=utf-8", + ) + .await; + + println!("upsert res={res:?}",); + + let stats = store.get_s3_stats().unwrap(); + println!("stats {stats:?}",); +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/include/nix.h b/src/hydra-queue-runner-v2/crates/nix-utils/include/nix.h new file mode 100644 index 000000000..dfb981f45 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/include/nix.h @@ -0,0 +1,64 @@ +#pragma once + +#include "rust/cxx.h" +#include +#include + +namespace nix_utils { +class StoreWrapper { +public: + StoreWrapper(nix::ref _store); + + nix::ref _store; +}; +} // namespace nix_utils + +// we need to include this after StoreWrapper +#include "nix-utils/src/lib.rs.h" + +namespace nix_utils { +std::shared_ptr init(rust::Str uri); + +rust::String get_nix_prefix(); +rust::String get_store_dir(); +rust::String get_log_dir(); +rust::String get_state_dir(); +rust::String get_this_system(); +rust::Vec get_extra_platforms(); +rust::Vec get_system_features(); +bool get_use_cgroups(); +void set_verbosity(int32_t level); + +bool is_valid_path(const StoreWrapper &wrapper, rust::Str path); +InternalPathInfo query_path_info(const StoreWrapper &wrapper, rust::Str path); +void clear_path_info_cache(const StoreWrapper &wrapper); +uint64_t compute_closure_size(const StoreWrapper &wrapper, rust::Str path); +rust::Vec compute_fs_closure(const StoreWrapper &wrapper, + rust::Str path, bool flip_direction, + bool include_outputs, + bool include_derivers); +rust::Vec +compute_fs_closures(const StoreWrapper &wrapper, + rust::Slice paths, bool flip_direction, + bool include_outputs, bool include_derivers, bool toposort); +void upsert_file(const StoreWrapper &wrapper, rust::Str path, rust::Str data, + rust::Str mime_type); +StoreStats get_store_stats(const StoreWrapper &wrapper); +S3Stats get_s3_stats(const StoreWrapper &wrapper); +void copy_paths(const StoreWrapper &src_store, const StoreWrapper &dst_store, + rust::Slice paths, bool repair, + bool check_sigs, bool substitute); + +void import_paths( + const StoreWrapper &wrapper, bool check_sigs, size_t runtime, size_t reader, + rust::Fn, size_t, size_t, size_t)> callback, + size_t user_data); +void import_paths_with_fd(const StoreWrapper &wrapper, bool check_sigs, + int32_t fd); +void export_paths(const StoreWrapper &src_store, + rust::Slice paths, + rust::Fn, size_t)> callback, + size_t userdata); + +rust::String try_resolve_drv(const StoreWrapper &wrapper, rust::Str path); +} // namespace nix_utils diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/src/drv.rs b/src/hydra-queue-runner-v2/crates/nix-utils/src/drv.rs new file mode 100644 index 000000000..9c8859ff5 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/src/drv.rs @@ -0,0 +1,269 @@ +use ahash::AHashMap; +use tokio::io::{AsyncBufReadExt as _, BufReader}; +use tokio_stream::wrappers::LinesStream; + +use crate::StorePath; + +#[derive(Debug, Clone)] +pub struct Output { + pub name: String, + pub path: Option, + pub hash: Option, + pub hash_algo: Option, +} + +#[derive(Debug, Clone)] +pub struct DerivationEnv { + inner: AHashMap, +} + +impl DerivationEnv { + fn new(v: AHashMap) -> Self { + Self { inner: v } + } + + pub fn get(&self, k: &str) -> Option<&str> { + self.inner.get(k).map(|v| v.as_str()) + } + + pub fn get_required_system_features(&self) -> Vec<&str> { + self.inner + .get("requiredSystemFeatures") + .map(|v| v.as_str()) + .unwrap_or_default() + .split(' ') + .filter(|v| !v.is_empty()) + .collect() + } + + pub fn get_output_hash(&self) -> Option<&str> { + self.inner.get("outputHash").map(|v| v.as_str()) + } + + pub fn get_output_hash_mode(&self) -> Option<&str> { + self.inner.get("outputHash").map(|v| v.as_str()) + } +} + +#[derive(Debug, Clone)] +pub struct Derivation { + pub env: DerivationEnv, + pub input_drvs: Vec, + pub outputs: Vec, + pub name: String, + pub system: String, +} + +impl Derivation { + fn new(path: String, v: nix_diff::types::Derivation) -> Result { + Ok(Self { + env: DerivationEnv::new( + v.env + .into_iter() + .filter_map(|(k, v)| { + Some((String::from_utf8(k).ok()?, String::from_utf8(v).ok()?)) + }) + .collect(), + ), + input_drvs: v + .input_derivations + .into_keys() + .filter_map(|v| String::from_utf8(v).ok()) + .collect(), + outputs: v + .outputs + .into_iter() + .filter_map(|(k, v)| { + Some(Output { + name: String::from_utf8(k).ok()?, + path: if v.path.is_empty() { + None + } else { + String::from_utf8(v.path).ok().map(|p| StorePath::new(&p)) + }, + hash: v.hash.map(String::from_utf8).transpose().ok()?, + hash_algo: v.hash_algorithm.map(String::from_utf8).transpose().ok()?, + }) + }) + .collect(), + name: path, + system: String::from_utf8(v.platform).unwrap_or_default(), + }) + } +} + +#[tracing::instrument(fields(%drv), err)] +pub async fn query_drv(drv: &StorePath) -> Result, crate::Error> { + if !drv.is_drv() { + return Ok(None); + } + + let full_path = drv.get_full_path(); + if !tokio::fs::try_exists(&full_path).await? { + return Ok(None); + } + + let input = tokio::fs::read_to_string(&full_path).await?; + Ok(Some(Derivation::new( + full_path, + nix_diff::parser::parse_derivation_string(&input)?, + )?)) +} + +#[derive(Debug, Clone)] +pub struct BuildOptions { + max_log_size: u64, + max_silent_time: i32, + build_timeout: i32, + substitute: bool, + build: bool, +} + +fn format_bool(v: bool) -> &'static str { + if v { "true" } else { "false" } +} + +impl BuildOptions { + pub fn new(max_log_size: Option) -> Self { + Self { + max_log_size: max_log_size.unwrap_or(64u64 << 20), + max_silent_time: 0, + build_timeout: 0, + substitute: false, + build: true, + } + } + + pub fn complete(max_log_size: u64, max_silent_time: i32, build_timeout: i32) -> Self { + Self { + max_log_size, + max_silent_time, + build_timeout, + substitute: false, + build: true, + } + } + + pub fn substitute_only() -> Self { + let mut o = Self::new(None); + o.build = false; + o.substitute = true; + o.max_silent_time = 60 * 5; + o.build_timeout = 60 * 5; + o + } + + pub fn set_max_silent_time(&mut self, max_silent_time: i32) { + self.max_silent_time = max_silent_time; + } + + pub fn set_build_timeout(&mut self, build_timeout: i32) { + self.build_timeout = build_timeout; + } + + pub fn get_max_log_size(&self) -> u64 { + self.max_log_size + } + + pub fn get_max_silent_time(&self) -> i32 { + self.max_silent_time + } + + pub fn get_build_timeout(&self) -> i32 { + self.build_timeout + } +} + +#[allow(clippy::type_complexity)] +#[tracing::instrument(skip(opts, drvs), err)] +pub async fn realise_drvs( + drvs: &[&StorePath], + opts: &BuildOptions, + kill_on_drop: bool, +) -> Result< + ( + tokio::process::Child, + tokio_stream::adapters::Merge< + LinesStream>, + LinesStream>, + >, + ), + crate::Error, +> { + use tokio_stream::StreamExt; + + let mut child = tokio::process::Command::new("nix-store") + .args([ + "-r", + "--quiet", // we want to always set this + "--max-silent-time", + &opts.max_silent_time.to_string(), + "--timeout", + &opts.build_timeout.to_string(), + "--option", + "max-build-log-size", + &opts.max_log_size.to_string(), + "--option", + "fallback", + format_bool(opts.build), + "--option", + "substitute", + format_bool(opts.substitute), + "--option", + "builders", + "", + ]) + .args(drvs.iter().map(|v| v.get_full_path())) + .kill_on_drop(kill_on_drop) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn()?; + + let stdout = child.stdout.take().ok_or(crate::Error::Stream)?; + let stderr = child.stderr.take().ok_or(crate::Error::Stream)?; + + let stdout = LinesStream::new(BufReader::new(stdout).lines()); + let stderr = LinesStream::new(BufReader::new(stderr).lines()); + + Ok((child, StreamExt::merge(stdout, stderr))) +} + +#[allow(clippy::type_complexity)] +#[tracing::instrument(skip(opts), fields(%drv), err)] +pub async fn realise_drv( + drv: &StorePath, + opts: &BuildOptions, + kill_on_drop: bool, +) -> Result< + ( + tokio::process::Child, + tokio_stream::adapters::Merge< + LinesStream>, + LinesStream>, + >, + ), + crate::Error, +> { + realise_drvs(&[drv], opts, kill_on_drop).await +} + +#[tracing::instrument(skip(outputs))] +pub async fn query_missing_outputs(outputs: Vec) -> Vec { + use futures::stream::StreamExt as _; + + tokio_stream::iter(outputs) + .map(|o| async move { + let Some(path) = &o.path else { + return None; + }; + if !super::check_if_storepath_exists(path).await { + Some(o) + } else { + None + } + }) + .buffered(50) + .filter_map(|o| async { o }) + .collect() + .await +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/src/lib.rs b/src/hydra-queue-runner-v2/crates/nix-utils/src/lib.rs new file mode 100644 index 000000000..31a28b2bb --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/src/lib.rs @@ -0,0 +1,983 @@ +mod drv; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("std io error: `{0}`")] + Io(#[from] std::io::Error), + + #[error("tokio join error: `{0}`")] + TokioJoin(#[from] tokio::task::JoinError), + + #[error("utf8 error: `{0}`")] + Utf8(#[from] std::str::Utf8Error), + + #[error("Failed to get tokio stdout stream")] + Stream, + + #[error("regex error: `{0}`")] + Regex(#[from] regex::Error), + + #[error("Command failed with `{0}`")] + Exit(std::process::ExitStatus), + + #[error("Exception was thrown `{0}`")] + Exception(#[from] cxx::Exception), + + #[error("anyhow error: `{0}`")] + Anyhow(#[from] anyhow::Error), +} + +use ahash::AHashMap; +pub use drv::{ + BuildOptions, Derivation, Output as DerivationOutput, query_drv, query_missing_outputs, + realise_drv, realise_drvs, +}; + +pub const HASH_LEN: usize = 32; + +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +pub struct StorePath { + base_name: String, +} + +impl StorePath { + pub fn new(p: &str) -> Self { + if let Some(postfix) = p.strip_prefix("/nix/store/") { + debug_assert!(postfix.len() > HASH_LEN + 1); + Self { + base_name: postfix.to_string(), + } + } else { + debug_assert!(p.len() > HASH_LEN + 1); + Self { + base_name: p.to_string(), + } + } + } + + pub fn into_base_name(self) -> String { + self.base_name + } + + pub fn base_name(&self) -> &str { + &self.base_name + } + + pub fn name(&self) -> &str { + &self.base_name[HASH_LEN + 1..] + } + + pub fn hash_part(&self) -> &str { + &self.base_name[..HASH_LEN] + } + + pub fn is_drv(&self) -> bool { + self.base_name.ends_with(".drv") + } + + pub fn get_full_path(&self) -> String { + format!("/nix/store/{}", self.base_name) + } +} +impl serde::Serialize for StorePath { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(self.base_name()) + } +} + +impl std::fmt::Display for StorePath { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> { + write!(f, "{}", self.base_name) + } +} + +#[tracing::instrument(skip(path))] +pub async fn check_if_storepath_exists(path: &StorePath) -> bool { + tokio::fs::try_exists(&path.get_full_path()) + .await + .unwrap_or_default() +} + +pub fn validate_statuscode(status: std::process::ExitStatus) -> Result<(), Error> { + if status.success() { + Ok(()) + } else { + Err(Error::Exit(status)) + } +} + +pub fn add_root(root_dir: &std::path::Path, store_path: &StorePath) { + let path = root_dir.join(store_path.base_name()); + // force create symlink + if path.exists() { + let _ = std::fs::remove_file(&path); + } + if !path.exists() { + let _ = std::os::unix::fs::symlink(store_path.get_full_path(), path); + } +} + +#[cxx::bridge(namespace = "nix_utils")] +mod ffi { + #[derive(Debug)] + struct InternalPathInfo { + deriver: String, + nar_hash: String, + registration_time: i64, + nar_size: u64, + refs: Vec, + sigs: Vec, + ca: String, + } + + #[derive(Debug)] + struct StoreStats { + nar_info_read: u64, + nar_info_read_averted: u64, + nar_info_missing: u64, + nar_info_write: u64, + path_info_cache_size: u64, + nar_read: u64, + nar_read_bytes: u64, + nar_read_compressed_bytes: u64, + nar_write: u64, + nar_write_averted: u64, + nar_write_bytes: u64, + nar_write_compressed_bytes: u64, + nar_write_compression_time_ms: u64, + } + + #[derive(Debug)] + struct S3Stats { + put: u64, + put_bytes: u64, + put_time_ms: u64, + get: u64, + get_bytes: u64, + get_time_ms: u64, + head: u64, + } + + unsafe extern "C++" { + include!("nix-utils/include/nix.h"); + + type StoreWrapper; + + fn init(uri: &str) -> SharedPtr; + + fn get_nix_prefix() -> String; + fn get_store_dir() -> String; + fn get_log_dir() -> String; + fn get_state_dir() -> String; + fn get_this_system() -> String; + fn get_extra_platforms() -> Vec; + fn get_system_features() -> Vec; + fn get_use_cgroups() -> bool; + fn set_verbosity(level: i32); + + fn is_valid_path(store: &StoreWrapper, path: &str) -> Result; + fn query_path_info(store: &StoreWrapper, path: &str) -> Result; + fn compute_closure_size(store: &StoreWrapper, path: &str) -> Result; + fn clear_path_info_cache(store: &StoreWrapper) -> Result<()>; + fn compute_fs_closure( + store: &StoreWrapper, + path: &str, + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + ) -> Result>; + fn compute_fs_closures( + store: &StoreWrapper, + paths: &[&str], + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + toposort: bool, + ) -> Result>; + fn upsert_file(store: &StoreWrapper, path: &str, data: &str, mime_type: &str) + -> Result<()>; + fn get_store_stats(store: &StoreWrapper) -> Result; + fn get_s3_stats(store: &StoreWrapper) -> Result; + fn copy_paths( + src_store: &StoreWrapper, + dst_store: &StoreWrapper, + paths: &[&str], + repair: bool, + check_sigs: bool, + substitute: bool, + ) -> Result<()>; + + fn import_paths( + store: &StoreWrapper, + check_sigs: bool, + runtime: usize, + reader: usize, + callback: unsafe extern "C" fn( + data: &mut [u8], + runtime: usize, + reader: usize, + user_data: usize, + ) -> usize, + user_data: usize, + ) -> Result<()>; + fn import_paths_with_fd(store: &StoreWrapper, check_sigs: bool, fd: i32) -> Result<()>; + fn export_paths( + store: &StoreWrapper, + paths: &[&str], + callback: unsafe extern "C" fn(data: &[u8], user_data: usize) -> bool, + user_data: usize, + ) -> Result<()>; + + fn try_resolve_drv(store: &StoreWrapper, path: &str) -> Result; + } +} + +pub use ffi::{S3Stats, StoreStats}; + +#[inline] +#[must_use] +pub fn get_nix_prefix() -> String { + ffi::get_nix_prefix() +} + +#[inline] +#[must_use] +pub fn get_store_dir() -> String { + ffi::get_store_dir() +} + +#[inline] +#[must_use] +pub fn get_log_dir() -> String { + ffi::get_log_dir() +} + +#[inline] +#[must_use] +pub fn get_state_dir() -> String { + ffi::get_state_dir() +} + +#[inline] +#[must_use] +pub fn get_this_system() -> String { + ffi::get_this_system() +} + +#[inline] +#[must_use] +pub fn get_extra_platforms() -> Vec { + ffi::get_extra_platforms() +} + +#[inline] +#[must_use] +pub fn get_system_features() -> Vec { + ffi::get_system_features() +} + +#[inline] +#[must_use] +pub fn get_use_cgroups() -> bool { + ffi::get_use_cgroups() +} + +#[inline] +/// Set the loglevel. +pub fn set_verbosity(level: i32) { + ffi::set_verbosity(level); +} + +pub(crate) async fn asyncify(f: F) -> Result +where + F: FnOnce() -> Result + Send + 'static, + T: Send + 'static, +{ + match tokio::task::spawn_blocking(f).await { + Ok(res) => Ok(res?), + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "background task failed", + ))?, + } +} + +#[inline] +pub async fn copy_paths( + src: &BaseStoreImpl, + dst: &BaseStoreImpl, + paths: &[StorePath], + repair: bool, + check_sigs: bool, + substitute: bool, +) -> Result<(), Error> { + let paths = paths.iter().map(|v| v.get_full_path()).collect::>(); + + let src = src.wrapper.clone(); + let dst = dst.wrapper.clone(); + + asyncify(move || { + let slice = paths.iter().map(|v| v.as_str()).collect::>(); + ffi::copy_paths(&src, &dst, &slice, repair, check_sigs, substitute) + }) + .await +} + +#[derive(Debug)] +pub struct PathInfo { + pub deriver: Option, + pub nar_hash: String, + pub registration_time: i64, + pub nar_size: u64, + pub refs: Vec, + pub sigs: Vec, + pub ca: Option, +} + +impl From for PathInfo { + fn from(val: crate::ffi::InternalPathInfo) -> Self { + Self { + deriver: if val.deriver.is_empty() { + None + } else { + Some(StorePath::new(&val.deriver)) + }, + nar_hash: val.nar_hash, + registration_time: val.registration_time, + nar_size: val.nar_size, + refs: val.refs.iter().map(|v| StorePath::new(v)).collect(), + sigs: val.sigs, + ca: if val.ca.is_empty() { + None + } else { + Some(val.ca) + }, + } + } +} + +pub trait BaseStore { + #[must_use] + /// Check whether a path is valid. + fn is_valid_path(&self, path: StorePath) -> impl std::future::Future; + + fn query_path_info(&self, path: &StorePath) -> Option; + fn query_path_infos(&self, paths: &[&StorePath]) -> AHashMap; + fn compute_closure_size(&self, path: &StorePath) -> u64; + + fn clear_path_info_cache(&self); + + fn compute_fs_closure( + &self, + path: &str, + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + ) -> Result, cxx::Exception>; + + fn compute_fs_closures( + &self, + paths: &[&str], + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + toposort: bool, + ) -> Result, cxx::Exception>; + + fn query_requisites( + &self, + drvs: Vec, + include_outputs: bool, + ) -> impl std::future::Future, crate::Error>>; + + fn get_store_stats(&self) -> Result; + + /// Import paths from nar + fn import_paths( + &self, + stream: S, + check_sigs: bool, + ) -> impl std::future::Future> + where + S: tokio_stream::Stream> + + Send + + Unpin + + 'static; + + /// Import paths from nar + fn import_paths_with_fd(&self, fd: Fd, check_sigs: bool) -> Result<(), cxx::Exception> + where + Fd: std::os::fd::AsFd + std::os::fd::AsRawFd; + + /// Export a store path in NAR format. The data is passed in chunks to callback + fn export_paths(&self, paths: &[StorePath], callback: F) -> Result<(), cxx::Exception> + where + F: FnMut(&[u8]) -> bool; + + fn try_resolve_drv(&self, path: &StorePath) -> Option; +} + +unsafe impl Send for crate::ffi::StoreWrapper {} +unsafe impl Sync for crate::ffi::StoreWrapper {} + +#[derive(Clone)] +pub struct BaseStoreImpl { + wrapper: cxx::SharedPtr, +} + +impl BaseStoreImpl { + fn new(store: cxx::SharedPtr) -> Self { + Self { wrapper: store } + } +} + +fn import_paths_trampoline( + data: &mut [u8], + runtime: usize, + reader: usize, + userdata: usize, +) -> usize +where + F: FnMut( + &tokio::runtime::Runtime, + &mut Box>, + &mut [u8], + ) -> usize, + S: futures::stream::Stream>, + E: Into, +{ + let runtime = + unsafe { &*(runtime as *mut std::ffi::c_void).cast::>() }; + let reader = unsafe { + &mut *(reader as *mut std::ffi::c_void) + .cast::>>() + }; + let closure = unsafe { &mut *(userdata as *mut std::ffi::c_void).cast::() }; + closure(runtime, reader, data) +} + +fn export_paths_trampoline(data: &[u8], userdata: usize) -> bool +where + F: FnMut(&[u8]) -> bool, +{ + let closure = unsafe { &mut *(userdata as *mut std::ffi::c_void).cast::() }; + closure(data) +} + +impl BaseStore for BaseStoreImpl { + #[inline] + async fn is_valid_path(&self, path: StorePath) -> bool { + let store = self.wrapper.clone(); + asyncify(move || ffi::is_valid_path(&store, &path.get_full_path())) + .await + .unwrap_or(false) + } + + #[inline] + fn query_path_info(&self, path: &StorePath) -> Option { + ffi::query_path_info(&self.wrapper, &path.get_full_path()) + .ok() + .map(Into::into) + } + + #[inline] + fn query_path_infos(&self, paths: &[&StorePath]) -> AHashMap { + let mut res = AHashMap::new(); + for p in paths { + if let Some(info) = self.query_path_info(p) { + res.insert((*p).to_owned(), info); + } + } + res + } + + #[inline] + fn compute_closure_size(&self, path: &StorePath) -> u64 { + ffi::compute_closure_size(&self.wrapper, &path.get_full_path()).unwrap_or_default() + } + + #[inline] + fn clear_path_info_cache(&self) { + let _ = ffi::clear_path_info_cache(&self.wrapper); + } + + #[inline] + #[tracing::instrument(skip(self), err)] + fn compute_fs_closure( + &self, + path: &str, + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + ) -> Result, cxx::Exception> { + ffi::compute_fs_closure( + &self.wrapper, + path, + flip_direction, + include_outputs, + include_derivers, + ) + } + + #[inline] + #[tracing::instrument(skip(self), err)] + fn compute_fs_closures( + &self, + paths: &[&str], + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + toposort: bool, + ) -> Result, cxx::Exception> { + Ok(ffi::compute_fs_closures( + &self.wrapper, + paths, + flip_direction, + include_outputs, + include_derivers, + toposort, + )? + .into_iter() + .map(|v| StorePath::new(&v)) + .collect()) + } + + async fn query_requisites( + &self, + drvs: Vec, + include_outputs: bool, + ) -> Result, Error> { + let paths = drvs.iter().map(|v| v.get_full_path()).collect::>(); + let slice = paths.iter().map(|v| v.as_str()).collect::>(); + + let mut out = self.compute_fs_closures(&slice, false, include_outputs, false, true)?; + out.reverse(); + Ok(out) + } + + fn get_store_stats(&self) -> Result { + ffi::get_store_stats(&self.wrapper) + } + + #[inline] + #[tracing::instrument(skip(self, stream), err)] + async fn import_paths(&self, stream: S, check_sigs: bool) -> Result<(), Error> + where + S: tokio_stream::Stream> + + Send + + Unpin + + 'static, + { + use tokio::io::AsyncReadExt as _; + + let callback = |runtime: &tokio::runtime::Runtime, + reader: &mut Box>, + data: &mut [u8]| { + runtime.block_on(async { reader.read(data).await.unwrap_or(0) }) + }; + + let reader = Box::new(tokio_util::io::StreamReader::new(stream)); + let store = self.clone(); + tokio::task::spawn_blocking(move || { + store.import_paths_with_cb(callback, reader, check_sigs) + }) + .await??; + Ok(()) + } + + #[inline] + #[tracing::instrument(skip(self, fd), err)] + fn import_paths_with_fd(&self, fd: Fd, check_sigs: bool) -> Result<(), cxx::Exception> + where + Fd: std::os::fd::AsFd + std::os::fd::AsRawFd, + { + ffi::import_paths_with_fd(&self.wrapper, check_sigs, fd.as_raw_fd()) + } + + #[inline] + #[tracing::instrument(skip(self, paths, callback), err)] + fn export_paths(&self, paths: &[StorePath], callback: F) -> Result<(), cxx::Exception> + where + F: FnMut(&[u8]) -> bool, + { + let paths = paths.iter().map(|v| v.get_full_path()).collect::>(); + let slice = paths.iter().map(|v| v.as_str()).collect::>(); + ffi::export_paths( + &self.wrapper, + &slice, + export_paths_trampoline::, + std::ptr::addr_of!(callback).cast::() as usize, + ) + } + #[inline] + fn try_resolve_drv(&self, path: &StorePath) -> Option { + let v = ffi::try_resolve_drv(&self.wrapper, &path.get_full_path()).ok()?; + v.is_empty().then_some(v).map(|v| StorePath::new(&v)) + } +} + +impl BaseStoreImpl { + #[inline] + #[tracing::instrument(skip(self, callback, reader), err)] + fn import_paths_with_cb( + &self, + callback: F, + reader: Box>, + check_sigs: bool, + ) -> Result<(), cxx::Exception> + where + F: FnMut( + &tokio::runtime::Runtime, + &mut Box>, + &mut [u8], + ) -> usize, + S: futures::stream::Stream>, + E: Into, + { + let runtime = Box::new(tokio::runtime::Runtime::new().unwrap()); + ffi::import_paths( + &self.wrapper, + check_sigs, + std::ptr::addr_of!(runtime).cast::() as usize, + std::ptr::addr_of!(reader).cast::() as usize, + import_paths_trampoline::, + std::ptr::addr_of!(callback).cast::() as usize, + )?; + drop(reader); + drop(runtime); + Ok(()) + } +} + +#[derive(Clone)] +pub struct LocalStore { + base: BaseStoreImpl, +} + +impl LocalStore { + #[inline] + /// Initialise a new store + pub fn init() -> Self { + Self { + base: BaseStoreImpl::new(ffi::init("")), + } + } + + pub fn as_base_store(&self) -> &BaseStoreImpl { + &self.base + } +} + +impl BaseStore for LocalStore { + #[inline] + async fn is_valid_path(&self, path: StorePath) -> bool { + self.base.is_valid_path(path).await + } + + #[inline] + fn query_path_info(&self, path: &StorePath) -> Option { + self.base.query_path_info(path) + } + + #[inline] + fn query_path_infos(&self, paths: &[&StorePath]) -> AHashMap { + self.base.query_path_infos(paths) + } + + #[inline] + fn compute_closure_size(&self, path: &StorePath) -> u64 { + self.base.compute_closure_size(path) + } + + #[inline] + fn clear_path_info_cache(&self) { + self.base.clear_path_info_cache(); + } + + #[inline] + #[tracing::instrument(skip(self), err)] + fn compute_fs_closure( + &self, + path: &str, + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + ) -> Result, cxx::Exception> { + self.base + .compute_fs_closure(path, flip_direction, include_outputs, include_derivers) + } + + #[inline] + #[tracing::instrument(skip(self), err)] + fn compute_fs_closures( + &self, + paths: &[&str], + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + toposort: bool, + ) -> Result, cxx::Exception> { + self.base.compute_fs_closures( + paths, + flip_direction, + include_outputs, + include_derivers, + toposort, + ) + } + + #[inline] + #[tracing::instrument(skip(self), err)] + async fn query_requisites( + &self, + drvs: Vec, + include_outputs: bool, + ) -> Result, Error> { + self.base.query_requisites(drvs, include_outputs).await + } + + #[inline] + fn get_store_stats(&self) -> Result { + self.base.get_store_stats() + } + + #[inline] + #[tracing::instrument(skip(self, stream), err)] + async fn import_paths(&self, stream: S, check_sigs: bool) -> Result<(), Error> + where + S: tokio_stream::Stream> + + Send + + Unpin + + 'static, + { + self.base.import_paths::(stream, check_sigs).await + } + + #[inline] + #[tracing::instrument(skip(self, fd), err)] + fn import_paths_with_fd(&self, fd: Fd, check_sigs: bool) -> Result<(), cxx::Exception> + where + Fd: std::os::fd::AsFd + std::os::fd::AsRawFd, + { + self.base.import_paths_with_fd(fd, check_sigs) + } + + #[inline] + #[tracing::instrument(skip(self, paths, callback), err)] + fn export_paths(&self, paths: &[StorePath], callback: F) -> Result<(), cxx::Exception> + where + F: FnMut(&[u8]) -> bool, + { + self.base.export_paths(paths, callback) + } + + #[inline] + fn try_resolve_drv(&self, path: &StorePath) -> Option { + self.base.try_resolve_drv(path) + } +} + +#[derive(Clone)] +pub struct RemoteStore { + base: BaseStoreImpl, + + pub uri: String, + pub base_uri: String, +} + +impl RemoteStore { + #[inline] + /// Initialise a new store with uri + pub fn init(uri: &str) -> Self { + let base_uri = url::Url::parse(uri) + .ok() + .and_then(|v| v.host_str().map(ToOwned::to_owned)) + .unwrap_or_default(); + + Self { + base: BaseStoreImpl::new(ffi::init(uri)), + uri: uri.into(), + base_uri, + } + } + + pub fn as_base_store(&self) -> &BaseStoreImpl { + &self.base + } + + #[inline] + pub async fn upsert_file( + &self, + path: String, + local_path: std::path::PathBuf, + mime_type: &'static str, + ) -> Result<(), Error> { + let store = self.base.wrapper.clone(); + asyncify(move || { + if let Ok(data) = std::fs::read_to_string(local_path) { + ffi::upsert_file(&store, &path, &data, mime_type)? + } + Ok(()) + }) + .await + } + + #[inline] + pub fn get_s3_stats(&self) -> Result { + ffi::get_s3_stats(&self.base.wrapper) + } + + #[tracing::instrument(skip(self, paths))] + pub async fn query_missing_paths(&self, paths: Vec) -> Vec { + use futures::stream::StreamExt as _; + + tokio_stream::iter(paths) + .map(|p| async move { + if !self.is_valid_path(p.clone()).await { + Some(p) + } else { + None + } + }) + .buffered(50) + .filter_map(|p| async { p }) + .collect() + .await + } + + #[tracing::instrument(skip(self, outputs))] + pub async fn query_missing_remote_outputs( + &self, + outputs: Vec, + ) -> Vec { + use futures::stream::StreamExt as _; + + tokio_stream::iter(outputs) + .map(|o| async move { + let Some(path) = &o.path else { + return None; + }; + if !self.is_valid_path(path.clone()).await { + Some(o) + } else { + None + } + }) + .buffered(50) + .filter_map(|o| async { o }) + .collect() + .await + } +} + +impl BaseStore for RemoteStore { + #[inline] + async fn is_valid_path(&self, path: StorePath) -> bool { + self.base.is_valid_path(path).await + } + + #[inline] + fn query_path_info(&self, path: &StorePath) -> Option { + self.base.query_path_info(path) + } + + #[inline] + fn query_path_infos(&self, paths: &[&StorePath]) -> AHashMap { + self.base.query_path_infos(paths) + } + + #[inline] + fn compute_closure_size(&self, path: &StorePath) -> u64 { + self.base.compute_closure_size(path) + } + + #[inline] + fn clear_path_info_cache(&self) { + self.base.clear_path_info_cache(); + } + + #[inline] + #[tracing::instrument(skip(self), err)] + fn compute_fs_closure( + &self, + path: &str, + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + ) -> Result, cxx::Exception> { + self.base + .compute_fs_closure(path, flip_direction, include_outputs, include_derivers) + } + + #[inline] + #[tracing::instrument(skip(self), err)] + fn compute_fs_closures( + &self, + paths: &[&str], + flip_direction: bool, + include_outputs: bool, + include_derivers: bool, + toposort: bool, + ) -> Result, cxx::Exception> { + self.base.compute_fs_closures( + paths, + flip_direction, + include_outputs, + include_derivers, + toposort, + ) + } + + #[inline] + #[tracing::instrument(skip(self), err)] + async fn query_requisites( + &self, + drvs: Vec, + include_outputs: bool, + ) -> Result, Error> { + self.base.query_requisites(drvs, include_outputs).await + } + + #[inline] + fn get_store_stats(&self) -> Result { + self.base.get_store_stats() + } + + #[inline] + #[tracing::instrument(skip(self, stream), err)] + async fn import_paths(&self, stream: S, check_sigs: bool) -> Result<(), Error> + where + S: tokio_stream::Stream> + + Send + + Unpin + + 'static, + { + self.base.import_paths::(stream, check_sigs).await + } + + #[inline] + #[tracing::instrument(skip(self, fd), err)] + fn import_paths_with_fd(&self, fd: Fd, check_sigs: bool) -> Result<(), cxx::Exception> + where + Fd: std::os::fd::AsFd + std::os::fd::AsRawFd, + { + self.base.import_paths_with_fd(fd, check_sigs) + } + + #[inline] + #[tracing::instrument(skip(self, paths, callback), err)] + fn export_paths(&self, paths: &[StorePath], callback: F) -> Result<(), cxx::Exception> + where + F: FnMut(&[u8]) -> bool, + { + self.base.export_paths(paths, callback) + } + + #[inline] + fn try_resolve_drv(&self, path: &StorePath) -> Option { + self.base.try_resolve_drv(path) + } +} diff --git a/src/hydra-queue-runner-v2/crates/nix-utils/src/nix.cpp b/src/hydra-queue-runner-v2/crates/nix-utils/src/nix.cpp new file mode 100644 index 000000000..b2318a894 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/nix-utils/src/nix.cpp @@ -0,0 +1,303 @@ +#include "nix-utils/include/nix.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include + +static std::atomic initializedNix = false; + +#define AS_VIEW(rstr) std::string_view(rstr.data(), rstr.length()) +#define AS_STRING(rstr) std::string(rstr.data(), rstr.length()) + +static inline rust::String +extract_opt_path(const nix::Store &store, + const std::optional &v) { + // TODO(conni2461): Replace with option + return v ? store.printStorePath(*v) : ""; +} + +static inline rust::Vec +extract_path_set(const nix::Store &store, const nix::StorePathSet &set) { + rust::Vec data; + data.reserve(set.size()); + for (const nix::StorePath &path : set) { + data.emplace_back(store.printStorePath(path)); + } + return data; +} + +static inline rust::Vec +extract_paths(const nix::Store &store, const nix::StorePaths &set) { + rust::Vec data; + data.reserve(set.size()); + for (const nix::StorePath &path : set) { + data.emplace_back(store.printStorePath(path)); + } + return data; +} + +namespace nix_utils { +StoreWrapper::StoreWrapper(nix::ref _store) : _store(_store) {} + +std::shared_ptr init(rust::Str uri) { + if (!initializedNix) { + initializedNix = true; + nix::initNix(); + } + if (uri.empty()) { + nix::ref _store = nix::openStore(); + return std::make_shared(_store); + } else { + nix::ref _store = nix::openStore(AS_STRING(uri)); + return std::make_shared(_store); + } +} + +rust::String get_nix_prefix() { return nix::settings.nixPrefix; } +rust::String get_store_dir() { return nix::settings.nixStore; } +rust::String get_log_dir() { return nix::settings.nixLogDir; } +rust::String get_state_dir() { return nix::settings.nixStateDir; } +rust::String get_this_system() { return nix::settings.thisSystem.get(); } +rust::Vec get_extra_platforms() { + auto set = nix::settings.extraPlatforms.get(); + rust::Vec data; + data.reserve(set.size()); + for (const auto &val : set) { + data.emplace_back(val); + } + return data; +} +rust::Vec get_system_features() { + auto set = nix::settings.systemFeatures.get(); + rust::Vec data; + data.reserve(set.size()); + for (const auto &val : set) { + data.emplace_back(val); + } + return data; +} +bool get_use_cgroups() { +#ifdef __linux__ + return nix::settings.useCgroups; +#endif + return false; +} +void set_verbosity(int32_t level) { nix::verbosity = (nix::Verbosity)level; } + +bool is_valid_path(const StoreWrapper &wrapper, rust::Str path) { + auto store = wrapper._store; + return store->isValidPath(store->parseStorePath(AS_VIEW(path))); +} + +InternalPathInfo query_path_info(const StoreWrapper &wrapper, rust::Str path) { + auto store = wrapper._store; + auto info = store->queryPathInfo(store->parseStorePath(AS_VIEW(path))); + + std::string narhash = info->narHash.to_string(nix::HashFormat::Nix32, true); + + rust::Vec refs = extract_path_set(*store, info->references); + + rust::Vec sigs; + sigs.reserve(info->sigs.size()); + for (const std::string &sig : info->sigs) { + sigs.push_back(sig); + } + + // TODO(conni2461): Replace "" with option + return InternalPathInfo{ + extract_opt_path(*store, info->deriver), + narhash, + info->registrationTime, + info->narSize, + refs, + sigs, + info->ca ? nix::renderContentAddress(*info->ca) : "", + }; +} + +uint64_t compute_closure_size(const StoreWrapper &wrapper, rust::Str path) { + auto store = wrapper._store; + nix::StorePathSet closure; + store->computeFSClosure(store->parseStorePath(AS_VIEW(path)), closure, false, + false); + + uint64_t totalNarSize = 0; + for (auto &p : closure) { + totalNarSize += store->queryPathInfo(p)->narSize; + } + return totalNarSize; +} + +void clear_path_info_cache(const StoreWrapper &wrapper) { + auto store = wrapper._store; + store->clearPathInfoCache(); +} + +rust::Vec compute_fs_closure(const StoreWrapper &wrapper, + rust::Str path, bool flip_direction, + bool include_outputs, + bool include_derivers) { + auto store = wrapper._store; + nix::StorePathSet path_set; + store->computeFSClosure(store->parseStorePath(AS_VIEW(path)), path_set, + flip_direction, include_outputs, include_derivers); + return extract_path_set(*store, path_set); +} + +rust::Vec compute_fs_closures(const StoreWrapper &wrapper, + rust::Slice paths, + bool flip_direction, + bool include_outputs, + bool include_derivers, + bool toposort) { + auto store = wrapper._store; + nix::StorePathSet path_set; + for (auto &path : paths) { + store->computeFSClosure(store->parseStorePath(AS_VIEW(path)), path_set, + flip_direction, include_outputs, include_derivers); + } + if (toposort) { + auto sorted = store->topoSortPaths(path_set); + return extract_paths(*store, sorted); + } else { + return extract_path_set(*store, path_set); + } +} + +void upsert_file(const StoreWrapper &wrapper, rust::Str path, rust::Str data, + rust::Str mime_type) { + auto store = wrapper._store.dynamic_pointer_cast(); + if (!store) { + throw nix::Error("Not a binary chache store"); + } + store->upsertFile(AS_STRING(path), AS_STRING(data), AS_STRING(mime_type)); +} + +StoreStats get_store_stats(const StoreWrapper &wrapper) { + auto store = wrapper._store; + auto &stats = store->getStats(); + return StoreStats{ + stats.narInfoRead.load(), + stats.narInfoReadAverted.load(), + stats.narInfoMissing.load(), + stats.narInfoWrite.load(), + stats.pathInfoCacheSize.load(), + stats.narRead.load(), + stats.narReadBytes.load(), + stats.narReadCompressedBytes.load(), + stats.narWrite.load(), + stats.narWriteAverted.load(), + stats.narWriteBytes.load(), + stats.narWriteCompressedBytes.load(), + stats.narWriteCompressionTimeMs.load(), + }; +} + +S3Stats get_s3_stats(const StoreWrapper &wrapper) { + auto store = wrapper._store; + auto s3Store = dynamic_cast(&*store); + if (!s3Store) { + throw nix::Error("Not a s3 binary chache store"); + } + auto &stats = s3Store->getS3Stats(); + return S3Stats{ + stats.put.load(), stats.putBytes.load(), stats.putTimeMs.load(), + stats.get.load(), stats.getBytes.load(), stats.getTimeMs.load(), + stats.head.load(), + }; +} + +void copy_paths(const StoreWrapper &src_store, const StoreWrapper &dst_store, + rust::Slice paths, bool repair, + bool check_sigs, bool substitute) { + nix::StorePathSet path_set; + for (auto &path : paths) { + path_set.insert(src_store._store->parseStorePath(AS_VIEW(path))); + } + nix::copyPaths(*src_store._store, *dst_store._store, path_set, + repair ? nix::Repair : nix::NoRepair, + check_sigs ? nix::CheckSigs : nix::NoCheckSigs, + substitute ? nix::Substitute : nix::NoSubstitute); +} + +void import_paths( + const StoreWrapper &wrapper, bool check_sigs, size_t runtime, size_t reader, + rust::Fn, size_t, size_t, size_t)> + callback, + size_t user_data) { + nix::LambdaSource source([=](char *out, size_t out_len) { + auto data = rust::Slice((uint8_t *)out, out_len); + size_t ret = (*callback)(data, runtime, reader, user_data); + if (!ret) { + throw nix::EndOfFile("End of stream reached"); + } + return ret; + }); + + auto store = wrapper._store; + auto paths = store->importPaths(source, check_sigs ? nix::CheckSigs + : nix::NoCheckSigs); +} + +void import_paths_with_fd(const StoreWrapper &wrapper, bool check_sigs, + int32_t fd) { + nix::FdSource source(fd); + + auto store = wrapper._store; + store->importPaths(source, check_sigs ? nix::CheckSigs : nix::NoCheckSigs); +} + +class StopExport : public std::exception { +public: + const char *what() { return "Stop exporting nar"; } +}; + +void export_paths(const StoreWrapper &wrapper, + rust::Slice paths, + rust::Fn, size_t)> callback, + size_t user_data) { + nix::LambdaSink sink([=](std::string_view v) { + auto data = rust::Slice((const uint8_t *)v.data(), v.size()); + bool ret = (*callback)(data, user_data); + if (!ret) { + throw StopExport(); + } + }); + + auto store = wrapper._store; + nix::StorePathSet path_set; + for (auto &path : paths) { + path_set.insert(store->followLinksToStorePath(AS_VIEW(path))); + } + try { + store->exportPaths(path_set, sink); + } catch (StopExport &e) { + // Intentionally do nothing. We're only using the exception as a + // short-circuiting mechanism. + } +} + +rust::String try_resolve_drv(const StoreWrapper &wrapper, rust::Str path) { + auto store = wrapper._store; + + auto drv = store->readDerivation(store->parseStorePath(AS_VIEW(path))); + auto resolved = drv.tryResolve(*store); + if (!resolved) { + return ""; + } + + auto resolved_path = writeDerivation(*store, *resolved, nix::NoRepair, false); + // TODO: return drv not drv path + return extract_opt_path(*store, resolved_path); +} +} // namespace nix_utils diff --git a/src/hydra-queue-runner-v2/crates/shared/Cargo.toml b/src/hydra-queue-runner-v2/crates/shared/Cargo.toml new file mode 100644 index 000000000..33fd34425 --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/shared/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "shared" +version = "0.1.0" +edition = "2024" +license = "GPL-3.0" + +[dependencies] +tracing = "0.1" +anyhow = "1.0.98" +tokio = { version = "1.34", features = ["full"] } +regex = "1" +sha2 = "0.10" + +nix-utils = { path = "../nix-utils" } diff --git a/src/hydra-queue-runner-v2/crates/shared/src/lib.rs b/src/hydra-queue-runner-v2/crates/shared/src/lib.rs new file mode 100644 index 000000000..597c1b7ed --- /dev/null +++ b/src/hydra-queue-runner-v2/crates/shared/src/lib.rs @@ -0,0 +1,232 @@ +use std::{os::unix::fs::MetadataExt as _, sync::LazyLock}; + +use sha2::{Digest as _, Sha256}; +use tokio::io::{AsyncBufReadExt as _, AsyncReadExt as _, BufReader}; + +use nix_utils::StorePath; + +static VALIDATE_METRICS_NAME: LazyLock = + LazyLock::new(|| regex::Regex::new("[a-zA-Z0-9._-]+").expect("Failed to compile regex")); +static VALIDATE_METRICS_UNIT: LazyLock = + LazyLock::new(|| regex::Regex::new("[a-zA-Z0-9._%-]+").expect("Failed to compile regex")); +static VALIDATE_RELEASE_NAME: LazyLock = + LazyLock::new(|| regex::Regex::new("[a-zA-Z0-9.@:_-]+").expect("Failed to compile regex")); +static VALIDATE_PRODUCT_NAME: LazyLock = + LazyLock::new(|| regex::Regex::new("[a-zA-Z0-9.@:_ -]*").expect("Failed to compile regex")); + +pub struct BuildProduct { + pub path: String, + pub default_path: String, + + pub r#type: String, + pub subtype: String, + pub name: String, + + pub is_regular: bool, + + pub sha256hash: Option, + pub file_size: Option, +} + +pub struct BuildMetric { + pub path: String, + pub name: String, + pub unit: Option, + pub value: f64, +} + +pub struct NixSupport { + pub failed: bool, + pub hydra_release_name: Option, + pub metrics: Vec, + pub products: Vec, +} + +pub async fn parse_nix_support_from_outputs( + derivation_outputs: &[nix_utils::DerivationOutput], +) -> anyhow::Result { + let mut metrics = Vec::new(); + let mut failed = false; + let mut hydra_release_name = None; + + let outputs = derivation_outputs + .iter() + .filter_map(|o| o.path.as_ref()) + .collect::>(); + for output in &outputs { + let output_full_path = output.get_full_path(); + let file_path = std::path::Path::new(&output_full_path).join("nix-support/hydra-metrics"); + let Ok(file) = tokio::fs::File::open(&file_path).await else { + continue; + }; + + let reader = BufReader::new(file); + let mut lines = reader.lines(); + + while let Some(line) = lines.next_line().await? { + let fields: Vec = line.split_whitespace().map(ToOwned::to_owned).collect(); + if fields.len() < 2 || !VALIDATE_METRICS_NAME.is_match(&fields[0]) { + continue; + } + + metrics.push(BuildMetric { + path: output_full_path.clone(), + name: fields[0].clone(), + value: fields[1].parse::().unwrap_or(0.0), + unit: if fields.len() >= 3 && VALIDATE_METRICS_UNIT.is_match(&fields[2]) { + Some(fields[2].clone()) + } else { + None + }, + }); + } + } + + for output in &outputs { + let file_path = std::path::Path::new(&output.get_full_path()).join("nix-support/failed"); + if tokio::fs::try_exists(file_path).await.unwrap_or_default() { + failed = true; + break; + } + } + + for output in &outputs { + let file_path = + std::path::Path::new(&output.get_full_path()).join("nix-support/hydra-release-name"); + if let Ok(v) = tokio::fs::read_to_string(file_path).await { + let v = v.trim(); + if !v.is_empty() && VALIDATE_RELEASE_NAME.is_match(v) { + hydra_release_name = Some(v.to_owned()); + break; + } + } + } + + let regex = regex::Regex::new( + r#"([a-zA-Z0-9_-]+)\s+([a-zA-Z0-9_-]+)\s+(\"[^\"]+\"|[^\"\s<>]+)(\s+([^\"\s<>]+))?"#, + )?; + let mut explicit_products = false; + let mut products = Vec::new(); + for output in &outputs { + let output_full_path = output.get_full_path(); + let file_path = + std::path::Path::new(&output_full_path).join("nix-support/hydra-build-products"); + let Ok(file) = tokio::fs::File::open(&file_path).await else { + continue; + }; + + explicit_products = true; + + let reader = BufReader::new(file); + let mut lines = reader.lines(); + while let Some(line) = lines.next_line().await? { + let Some(captures) = regex.captures(&line) else { + continue; + }; + + let s = captures[3].to_string(); + let path = if s.starts_with('"') && s.ends_with('"') { + s[1..s.len() - 1].to_string() + } else { + s + }; + + if path.is_empty() || !path.starts_with('/') { + continue; + } + let path = StorePath::new(&path); + let path_full_path = path.get_full_path(); + if !nix_utils::check_if_storepath_exists(&path).await { + continue; + } + let Ok(metadata) = tokio::fs::metadata(&path_full_path).await else { + continue; + }; + let is_regular = metadata.is_file(); + + let name = { + let name = if &path == *output { + String::new() + } else { + std::path::Path::new(&path_full_path) + .file_name() + .and_then(|f| f.to_str()) + .map(ToOwned::to_owned) + .unwrap_or_default() + }; + + if VALIDATE_PRODUCT_NAME.is_match(&name) { + name + } else { + "".into() + } + }; + + let sha256hash = if is_regular { + let mut file = tokio::fs::File::open(&path_full_path).await?; + let mut sha256 = Sha256::new(); + + let mut buffer = Vec::new(); + file.read_to_end(&mut buffer).await?; + sha256.update(&buffer); + + Some(format!("{:x}", sha256.finalize())) + } else { + None + }; + + products.push(BuildProduct { + r#type: captures[1].to_string(), + subtype: captures[2].to_string(), + path: path_full_path, + default_path: captures + .get(5) + .map(|m| m.as_str().to_string()) + .unwrap_or_default(), + name, + is_regular, + file_size: if is_regular { + Some(metadata.size()) + } else { + None + }, + sha256hash, + }); + } + } + + if !explicit_products { + for o in derivation_outputs { + let Some(path) = &o.path else { + continue; + }; + let full_path = path.get_full_path(); + let Ok(metadata) = tokio::fs::metadata(&full_path).await else { + continue; + }; + if metadata.is_dir() { + products.push(BuildProduct { + r#type: "nix-build".to_string(), + subtype: if o.name == "out" { + String::new() + } else { + o.name.clone() + }, + path: full_path, + name: path.name().to_string(), + default_path: String::new(), + is_regular: false, + file_size: None, + sha256hash: None, + }); + } + } + } + + Ok(NixSupport { + metrics, + failed, + hydra_release_name, + products, + }) +} diff --git a/src/hydra-queue-runner-v2/proto/v1/streaming.proto b/src/hydra-queue-runner-v2/proto/v1/streaming.proto new file mode 100644 index 000000000..6add0c9e6 --- /dev/null +++ b/src/hydra-queue-runner-v2/proto/v1/streaming.proto @@ -0,0 +1,214 @@ +syntax = "proto3"; + +package runner.v1; + +option java_multiple_files = true; +option java_outer_classname = "RunnerProto"; +option java_package = "io.grpc.hydra.runner"; + +service RunnerService { + rpc OpenTunnel(stream BuilderRequest) returns (stream RunnerRequest) {} + rpc BuildLog(stream LogChunk) returns (Empty) {} + rpc BuildResult(stream NarData) returns (Empty) {} + rpc BuildStepUpdate(StepUpdate) returns (Empty) {} + rpc CompleteBuild(BuildResultInfo) returns (Empty) {} + rpc FetchDrvRequisites(FetchRequisitesRequest) returns (DrvRequisitesMessage) {} + rpc StreamFile(StorePath) returns (stream NarData) {} + rpc StreamFiles(StorePaths) returns (stream NarData) {} +} + +message Empty {} + +message BuilderRequest { + oneof message { + JoinMessage join = 1; + PingMessage ping = 2; + } +} + +message JoinMessage { + string machine_id = 1; + repeated string systems = 2; + string hostname = 3; + uint32 cpu_count = 4; + float bogomips = 5; + float speed_factor = 6; + uint32 max_jobs = 7; + float tmp_avail_threshold = 8; + float store_avail_threshold = 9; + float load1_threshold = 10; + float cpu_psi_threshold = 11; + float mem_psi_threshold = 12; + optional float io_psi_threshold = 13; + uint64 total_mem = 14; + repeated string supported_features = 15; + repeated string mandatory_features = 16; + bool cgroups = 17; +} + +message Pressure { + float avg10 = 1; + float avg60 = 2; + float avg300 = 3; + uint64 total = 4; +} + +message PressureState { + Pressure cpu_some = 1; + Pressure mem_some = 2; + Pressure mem_full = 3; + Pressure io_some = 4; + Pressure io_full = 5; + Pressure irq_full = 6; +} + +message PingMessage { + string machine_id = 1; + float load1 = 2; + float load5 = 3; + float load15 = 4; + uint64 mem_usage = 5; + optional PressureState pressure = 6; + double tmp_free_percent = 7; + double store_free_percent = 8; +} + +message SimplePingMessage { + string message = 1; +} + +message RunnerRequest { + oneof message { + JoinResponse join = 1; + SimplePingMessage ping = 2; + BuildMessage build = 3; + AbortMessage abort = 4; + } +} + +message JoinResponse { + string machine_id = 1; + uint32 max_concurrent_downloads = 2; +} + +message BuildMessage { + string drv = 1; + optional string resolved_drv = 2; + uint64 max_log_size = 3; + int32 max_silent_time = 4; + int32 build_timeout = 5; + // bool is_deterministic = 5; + // bool enforce_determinism = 6; +} + +message DrvRequisitesMessage { + repeated string requisites = 1; +} + +message AbortMessage { + string drv = 1; +} + +message LogChunk { + string drv = 1; + bytes data = 2; +} + +message FetchRequisitesRequest { + string path = 1; + bool include_outputs = 2; +} + +message StorePath { + string path = 1; +} + +message StorePaths { + repeated string paths = 1; +} + +message NarData { + bytes chunk = 1; +} + +message OutputNameOnly { + string name = 1; +} + +message OutputWithPath { + string name = 1; + string path = 2; + uint64 closure_size = 3; + uint64 nar_size = 4; + string nar_hash = 5; +} + +message Output { + oneof output { + OutputNameOnly nameonly = 1; + OutputWithPath withpath = 2; + } +} + +message BuildMetric { + string path = 1; + string name = 2; + optional string unit = 3; + double value = 4; +} + +message BuildProduct { + string path = 1; + string default_path = 2; + + string type = 3; + string subtype = 4; + string name = 5; + + bool is_regular = 6; + + optional string sha256hash = 7; + optional uint64 file_size = 8; +} + +message NixSupport { + bool failed = 1; + optional string hydra_release_name = 2; + repeated BuildMetric metrics = 3; + repeated BuildProduct products = 4; +} + +enum StepStatus { + Preparing = 0; + Connecting = 1; + SeningInputs = 2; + Building = 3; + WaitingForLocalSlot = 4; + ReceivingOutputs = 5; + PostProcessing = 6; +} + +message StepUpdate { + string machine_id = 1; + string drv = 2; + StepStatus step_status = 3; +} + +enum BuildResultState { + BuildFailure = 0; + Success = 1; + PreparingFailure = 2; + ImportFailure = 3; + UploadFailure = 4; + PostProcessingFailure = 5; +} + +message BuildResultInfo { + string machine_id = 1; + string drv = 2; + uint64 import_time_ms = 3; + uint64 build_time_ms = 4; + BuildResultState result_state = 5; + NixSupport nix_support = 6; + repeated Output outputs = 7; +} diff --git a/src/hydra-queue-runner-v2/queue-runner/Cargo.toml b/src/hydra-queue-runner-v2/queue-runner/Cargo.toml new file mode 100644 index 000000000..ebd8066f0 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "queue-runner" +version = "0.1.0" +edition = "2024" +license = "GPL-3.0" + +[dependencies] +log = "0.4" +tracing = "0.1" +tracing-subscriber = { version = "0.3.18", features = [ + "registry", + "env-filter", +] } +tracing-log = "0.2.0" +console-subscriber = { version = "0.4.1", optional = true } + +sd-notify = "0.4.5" +lockfile = "0.4.0" +toml = "0.9.0" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +secrecy = { version = "0.10.3", features = ["serde"] } +ahash = "0.8.11" +arc-swap = "1.7.1" +parking_lot = "0.12.4" + +thiserror = "2.0" +anyhow = "1.0.98" +clap = { version = "4", features = ["derive", "env"] } +uuid = { version = "1.16", features = ["v4", "serde"] } +atomic_float = "1.1.0" + +tokio = { version = "1.34", features = ["full"] } +futures = "0.3" +futures-util = "0.3" +byte-unit = "5.1.6" + +tokio-stream = "0.1" +prost = "0.14" +tonic = { version = "0.14", features = ["zstd", "tls-ring"] } +tonic-reflection = "0.14" +tonic-prost = "0.14" +listenfd = "1" +async-stream = "0.3" +h2 = "0.4" + +hyper = { version = "1", features = ["full"] } +http-body-util = "0.1" +hyper-util = "0.1.10" +bytes = "1" +chrono = { version = "0.4.38", default-features = false, features = [ + "clock", + "std", + "serde", +] } +prometheus = "0.14.0" +procfs = "0.18" + +db = { path = "../crates/db" } +nix-utils = { path = "../crates/nix-utils" } +shared = { path = "../crates/shared" } + +[target.'cfg(not(target_env = "msvc"))'.dependencies] +tikv-jemallocator = "0.6" + +[build-dependencies] +tonic-prost-build = "0.14" + +[features] +tokio-console = ["console-subscriber"] diff --git a/src/hydra-queue-runner-v2/queue-runner/build.rs b/src/hydra-queue-runner-v2/queue-runner/build.rs new file mode 100644 index 000000000..2c52bf8ce --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/build.rs @@ -0,0 +1,9 @@ +use std::{env, path::PathBuf}; + +fn main() -> Result<(), Box> { + let out_dir = PathBuf::from(env::var("OUT_DIR")?); + tonic_prost_build::configure() + .file_descriptor_set_path(out_dir.join("streaming_descriptor.bin")) + .compile_protos(&["../proto/v1/streaming.proto"], &["../proto"])?; + Ok(()) +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/config.rs b/src/hydra-queue-runner-v2/queue-runner/src/config.rs new file mode 100644 index 000000000..76c894f8a --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/config.rs @@ -0,0 +1,513 @@ +use std::{net::SocketAddr, sync::Arc}; + +use clap::Parser; +use tracing_subscriber::{Layer as _, Registry, layer::SubscriberExt as _}; + +#[cfg(feature = "tokio-console")] +pub fn init_tracing() -> anyhow::Result< + tracing_subscriber::reload::Handle, +> { + tracing_log::LogTracer::init()?; + let (log_env_filter, reload_handle) = tracing_subscriber::reload::Layer::new( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ); + let fmt_layer = tracing_subscriber::fmt::layer() + .compact() + .with_filter(log_env_filter); + + let console_layer = console_subscriber::spawn(); + let subscriber = Registry::default().with(fmt_layer).with(console_layer); + tracing::subscriber::set_global_default(subscriber)?; + Ok(reload_handle) +} + +#[cfg(not(feature = "tokio-console"))] +pub fn init_tracing() -> anyhow::Result< + tracing_subscriber::reload::Handle, +> { + tracing_log::LogTracer::init()?; + let (log_env_filter, reload_handle) = tracing_subscriber::reload::Layer::new( + tracing_subscriber::EnvFilter::try_from_default_env() + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")), + ); + let fmt_layer = tracing_subscriber::fmt::layer() + .compact() + .with_filter(log_env_filter); + + let subscriber = Registry::default().with(fmt_layer); + tracing::subscriber::set_global_default(subscriber)?; + Ok(reload_handle) +} + +#[derive(Debug, Clone)] +pub enum BindSocket { + Tcp(SocketAddr), + Unix(std::path::PathBuf), + ListenFd, +} + +impl std::str::FromStr for BindSocket { + type Err = String; + + fn from_str(s: &str) -> Result { + s.parse::() + .map(BindSocket::Tcp) + .or_else(|_| { + if s == "-" { + Ok(BindSocket::ListenFd) + } else { + Ok(BindSocket::Unix(s.into())) + } + }) + } +} + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +pub struct Args { + /// Query the queue runner status + #[clap(long)] + pub status: bool, + + /// REST server bind + #[clap(short, long, default_value = "[::1]:8080")] + pub rest_bind: SocketAddr, + + /// GRPC server bind, either a `SocketAddr`, a Path for a Unix Socket or `-` to use `ListenFD` (systemd socket activation) + #[clap(short, long, default_value = "[::1]:50051")] + pub grpc_bind: BindSocket, + + /// Config path + #[clap(short, long, default_value = "config.toml")] + pub config_path: String, + + /// Path to Server cert + #[clap(long)] + pub server_cert_path: Option, + + /// Path to Server key + #[clap(long)] + pub server_key_path: Option, + + /// Path to Client ca cert + #[clap(long)] + pub client_ca_cert_path: Option, +} + +impl Args { + pub fn new() -> Self { + Self::parse() + } + + pub fn mtls_enabled(&self) -> bool { + self.server_cert_path.is_some() + && self.server_key_path.is_some() + && self.client_ca_cert_path.is_some() + } + + pub fn mtls_configured_correctly(&self) -> bool { + self.mtls_enabled() + || (self.server_cert_path.is_none() + && self.server_key_path.is_none() + && self.client_ca_cert_path.is_none()) + } + + pub async fn get_mtls( + &self, + ) -> anyhow::Result<(tonic::transport::Certificate, tonic::transport::Identity)> { + let server_cert_path = self + .server_cert_path + .as_deref() + .ok_or(anyhow::anyhow!("server_cert_path not provided"))?; + let server_key_path = self + .server_key_path + .as_deref() + .ok_or(anyhow::anyhow!("server_key_path not provided"))?; + + let client_ca_cert_path = self + .client_ca_cert_path + .as_deref() + .ok_or(anyhow::anyhow!("client_ca_cert_path not provided"))?; + let client_ca_cert = tokio::fs::read_to_string(client_ca_cert_path).await?; + let client_ca_cert = tonic::transport::Certificate::from_pem(client_ca_cert); + + let server_cert = tokio::fs::read_to_string(server_cert_path).await?; + let server_key = tokio::fs::read_to_string(server_key_path).await?; + let server_identity = tonic::transport::Identity::from_pem(server_cert, server_key); + Ok((client_ca_cert, server_identity)) + } +} + +fn default_data_dir() -> std::path::PathBuf { + "/tmp/hydra".into() +} + +fn default_pg_socket_url() -> secrecy::SecretString { + "postgres://hydra@%2Frun%2Fpostgresql:5432/hydra".into() +} + +fn default_max_db_connections() -> u32 { + 128 +} + +fn default_dispatch_trigger_timer_in_s() -> i64 { + 120 +} + +fn default_queue_trigger_timer_in_s() -> i64 { + -1 +} + +fn default_max_tries() -> u32 { + 5 +} + +fn default_retry_interval() -> u32 { + 60 +} + +fn default_retry_backoff() -> f32 { + 3.0 +} + +fn default_max_unsupported_time_in_s() -> i64 { + 120 +} + +fn default_stop_queue_run_after_in_s() -> i64 { + 60 +} + +fn default_max_concurrent_downloads() -> u32 { + 5 +} + +fn default_concurrent_upload_limit() -> usize { + 5 +} + +#[derive(Debug, Default, serde::Deserialize, Copy, Clone, PartialEq, Eq)] +pub enum MachineSortFn { + SpeedFactorOnly, + CpuCoreCountWithSpeedFactor, + #[default] + BogomipsWithSpeedFactor, +} + +#[derive(Debug, Default, serde::Deserialize, Copy, Clone, PartialEq, Eq)] +pub enum MachineFreeFn { + Dynamic, + DynamicWithMaxJobLimit, + #[default] + Static, +} + +/// Main configuration of the application +#[derive(Debug, serde::Deserialize)] +#[serde(deny_unknown_fields)] +#[serde(rename_all = "camelCase")] +struct AppConfig { + #[serde(default = "default_data_dir")] + hydra_data_dir: std::path::PathBuf, + + #[serde(default = "default_pg_socket_url")] + db_url: secrecy::SecretString, + + #[serde(default = "default_max_db_connections")] + max_db_connections: u32, + + #[serde(default)] + machine_sort_fn: MachineSortFn, + + #[serde(default)] + machine_free_fn: MachineFreeFn, + + // setting this to -1, will disable the timer + #[serde(default = "default_dispatch_trigger_timer_in_s")] + dispatch_trigger_timer_in_s: i64, + + // setting this to -1, will disable the timer + #[serde(default = "default_queue_trigger_timer_in_s")] + queue_trigger_timer_in_s: i64, + + #[serde(default)] + remote_store_addr: Vec, + + #[serde(default)] + use_substitutes: bool, + + roots_dir: Option, + + #[serde(default = "default_max_tries")] + max_retries: u32, + + #[serde(default = "default_retry_interval")] + retry_interval: u32, + + #[serde(default = "default_retry_backoff")] + retry_backoff: f32, + + #[serde(default = "default_max_unsupported_time_in_s")] + max_unsupported_time_in_s: i64, + + #[serde(default = "default_stop_queue_run_after_in_s")] + stop_queue_run_after_in_s: i64, + + #[serde(default = "default_max_concurrent_downloads")] + max_concurrent_downloads: u32, + + #[serde(default = "default_concurrent_upload_limit")] + concurrent_upload_limit: usize, +} + +/// Prepared configuration of the application +#[derive(Debug)] +pub struct PreparedApp { + #[allow(dead_code)] + hydra_data_dir: std::path::PathBuf, + hydra_log_dir: std::path::PathBuf, + lockfile: std::path::PathBuf, + pub db_url: secrecy::SecretString, + max_db_connections: u32, + pub machine_sort_fn: MachineSortFn, + machine_free_fn: MachineFreeFn, + dispatch_trigger_timer: Option, + queue_trigger_timer: Option, + pub remote_store_addr: Vec, + use_substitutes: bool, + roots_dir: std::path::PathBuf, + max_retries: u32, + retry_interval: f32, + retry_backoff: f32, + max_unsupported_time: chrono::Duration, + stop_queue_run_after: Option, + max_concurrent_downloads: u32, + concurrent_upload_limit: usize, +} + +impl TryFrom for PreparedApp { + type Error = anyhow::Error; + + fn try_from(val: AppConfig) -> Result { + let remote_store_addr = val + .remote_store_addr + .into_iter() + .filter(|v| { + v.starts_with("file://") + || v.starts_with("s3://") + || v.starts_with("ssh://") + || v.starts_with('/') + }) + .collect(); + + let logname = std::env::var("LOGNAME").expect("LOGNAME not set"); + let nix_state_dir = std::env::var("NIX_STATE_DIR").unwrap_or("/nix/var/nix/".to_owned()); + let roots_dir = if let Some(roots_dir) = val.roots_dir { + roots_dir + } else { + std::path::PathBuf::from(nix_state_dir) + .join("gcroots/per-user") + .join(logname) + .join("hydra-roots") + }; + std::fs::create_dir_all(&roots_dir)?; + + let hydra_log_dir = val.hydra_data_dir.join("build-logs"); + let lockfile = val.hydra_data_dir.join("queue-runner/lock"); + + Ok(Self { + hydra_data_dir: val.hydra_data_dir, + hydra_log_dir, + lockfile, + db_url: val.db_url, + max_db_connections: val.max_db_connections, + machine_sort_fn: val.machine_sort_fn, + machine_free_fn: val.machine_free_fn, + dispatch_trigger_timer: u64::try_from(val.dispatch_trigger_timer_in_s) + .ok() + .and_then(|v| { + if v == 0 { + None + } else { + Some(tokio::time::Duration::from_secs(v)) + } + }), + queue_trigger_timer: u64::try_from(val.queue_trigger_timer_in_s) + .ok() + .and_then(|v| { + if v == 0 { + None + } else { + Some(tokio::time::Duration::from_secs(v)) + } + }), + remote_store_addr, + use_substitutes: val.use_substitutes, + roots_dir, + max_retries: val.max_retries, + #[allow(clippy::cast_precision_loss)] + retry_interval: val.retry_interval as f32, + retry_backoff: val.retry_backoff, + max_unsupported_time: chrono::Duration::seconds(val.max_unsupported_time_in_s), + stop_queue_run_after: if val.stop_queue_run_after_in_s <= 0 { + None + } else { + Some(chrono::Duration::seconds(val.stop_queue_run_after_in_s)) + }, + max_concurrent_downloads: val.max_concurrent_downloads, + concurrent_upload_limit: val.concurrent_upload_limit, + }) + } +} + +/// Loads the config from specified path +fn load_config(filepath: &str) -> anyhow::Result { + log::info!("Trying to loading file: {filepath}"); + let toml: AppConfig = if let Ok(content) = std::fs::read_to_string(filepath) { + toml::from_str(&content).map_err(|e| anyhow::anyhow!("Failed to load '{filepath}': {e}"))? + } else { + log::warn!("no config file found! Using default config"); + toml::from_str("").map_err(|e| anyhow::anyhow!("Failed to parse \"\": {e}"))? + }; + log::info!("Loaded config: {toml:?}"); + + toml.try_into() + .map_err(|e| anyhow::anyhow!("Failed to prepare configuration: {e}")) +} + +#[derive(Clone)] +pub struct App { + inner: Arc>, +} + +impl App { + pub fn init(filepath: &str) -> anyhow::Result { + Ok(Self { + inner: Arc::new(arc_swap::ArcSwap::from(Arc::new(load_config(filepath)?))), + }) + } + + fn swap_inner(&self, new_val: PreparedApp) { + self.inner.store(Arc::new(new_val)); + } + + pub fn get_hydra_log_dir(&self) -> std::path::PathBuf { + let inner = self.inner.load(); + inner.hydra_log_dir.clone() + } + + pub fn get_lockfile(&self) -> std::path::PathBuf { + let inner = self.inner.load(); + inner.lockfile.clone() + } + + pub fn get_db_url(&self) -> secrecy::SecretString { + let inner = self.inner.load(); + inner.db_url.clone() + } + + pub fn get_max_db_connections(&self) -> u32 { + let inner = self.inner.load(); + inner.max_db_connections + } + + pub fn get_sort_fn(&self) -> MachineSortFn { + let inner = self.inner.load(); + inner.machine_sort_fn + } + + pub fn get_free_fn(&self) -> MachineFreeFn { + let inner = self.inner.load(); + inner.machine_free_fn + } + + pub fn get_dispatch_trigger_timer(&self) -> Option { + let inner = self.inner.load(); + inner.dispatch_trigger_timer + } + + pub fn get_queue_trigger_timer(&self) -> Option { + let inner = self.inner.load(); + inner.queue_trigger_timer + } + + pub fn get_remote_store_addrs(&self) -> Vec { + let inner = self.inner.load(); + inner.remote_store_addr.clone() + } + + pub fn get_use_substitutes(&self) -> bool { + let inner = self.inner.load(); + inner.use_substitutes + } + + pub fn get_roots_dir(&self) -> std::path::PathBuf { + let inner = self.inner.load(); + inner.roots_dir.clone() + } + + pub fn get_retry(&self) -> (u32, f32, f32) { + let inner = self.inner.load(); + (inner.max_retries, inner.retry_interval, inner.retry_backoff) + } + + pub fn get_max_unsupported_time(&self) -> chrono::Duration { + let inner = self.inner.load(); + inner.max_unsupported_time + } + + pub fn get_stop_queue_run_after(&self) -> Option { + let inner = self.inner.load(); + inner.stop_queue_run_after + } + + pub fn get_max_concurrent_downloads(&self) -> u32 { + let inner = self.inner.load(); + inner.max_concurrent_downloads + } + + pub fn get_concurrent_upload_limit(&self) -> usize { + let inner = self.inner.load(); + inner.concurrent_upload_limit + } +} + +pub fn reload(current_config: &App, filepath: &str, state: &Arc) { + let new_config = match load_config(filepath) { + Ok(c) => c, + Err(e) => { + log::warn!("Failed to load new config: {e}"); + let _notify = sd_notify::notify( + false, + &[ + sd_notify::NotifyState::Status("Reload failed"), + sd_notify::NotifyState::Errno(1), + ], + ); + + return; + } + }; + + if let Err(e) = state.reload_config_callback(&new_config) { + log::error!("Config reload failed with {e}"); + let _notify = sd_notify::notify( + false, + &[ + sd_notify::NotifyState::Status("Configuration reloaded failed - Running"), + sd_notify::NotifyState::Errno(1), + ], + ); + return; + } + + current_config.swap_inner(new_config); + let _notify = sd_notify::notify( + false, + &[ + sd_notify::NotifyState::Status("Configuration reloaded - Running"), + sd_notify::NotifyState::Ready, + ], + ); +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/io/mod.rs b/src/hydra-queue-runner-v2/queue-runner/src/io/mod.rs new file mode 100644 index 000000000..ac607ba9e --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/io/mod.rs @@ -0,0 +1,907 @@ +use std::sync::{Arc, atomic::Ordering}; + +use ahash::AHashMap; +use anyhow::Context as _; + +use db::models::BuildID; +use nix_utils::BaseStore as _; + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Empty {} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Error { + pub error: String, +} + +#[derive(Debug, serde::Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct BuildPayload { + pub drv: String, + pub jobset_id: i32, +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Pressure { + avg10: f32, + avg60: f32, + avg300: f32, + total: u64, +} + +impl Pressure { + pub fn new(item: Option<&crate::state::Pressure>) -> Option { + item.map(|v| Self { + avg10: v.avg10, + avg60: v.avg60, + avg300: v.avg300, + total: v.total, + }) + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct PressureState { + cpu_some: Option, + mem_some: Option, + mem_full: Option, + io_some: Option, + io_full: Option, + irq_full: Option, +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct MachineStats { + current_jobs: u64, + nr_steps_done: u64, + avg_step_time_ms: u64, + avg_step_import_time_ms: u64, + avg_step_build_time_ms: u64, + total_step_time_ms: u64, + total_step_import_time_ms: u64, + total_step_build_time_ms: u64, + idle_since: i64, + + last_failure: i64, + disabled_until: i64, + consecutive_failures: u64, + last_ping: i64, + since_last_ping: i64, + + load1: f32, + load5: f32, + load15: f32, + mem_usage: u64, + pressure: Option, + tmp_free_percent: f64, + store_free_percent: f64, + + jobs_in_last_30s_start: i64, + jobs_in_last_30s_count: u64, +} + +impl MachineStats { + fn from(item: &std::sync::Arc, now: i64) -> Self { + let last_ping = item.get_last_ping(); + + let nr_steps_done = item.get_nr_steps_done(); + let total_step_time_ms = item.get_total_step_time_ms(); + let total_step_import_time_ms = item.get_total_step_import_time_ms(); + let total_step_build_time_ms = item.get_total_step_build_time_ms(); + let (avg_step_time_ms, avg_step_import_time_ms, avg_step_build_time_ms) = + if nr_steps_done > 0 { + ( + total_step_time_ms / nr_steps_done, + total_step_import_time_ms / nr_steps_done, + total_step_build_time_ms / nr_steps_done, + ) + } else { + (0, 0, 0) + }; + + Self { + current_jobs: item.get_current_jobs(), + nr_steps_done, + avg_step_time_ms, + avg_step_import_time_ms, + avg_step_build_time_ms, + total_step_time_ms, + total_step_import_time_ms, + total_step_build_time_ms, + idle_since: item.get_idle_since(), + last_failure: item.get_last_failure(), + disabled_until: item.get_disabled_until(), + consecutive_failures: item.get_consecutive_failures(), + last_ping, + since_last_ping: now - last_ping, + load1: item.get_load1(), + load5: item.get_load5(), + load15: item.get_load15(), + mem_usage: item.get_mem_usage(), + pressure: item.pressure.load().as_ref().map(|p| PressureState { + cpu_some: Pressure::new(p.cpu_some.as_ref()), + mem_some: Pressure::new(p.mem_some.as_ref()), + mem_full: Pressure::new(p.mem_full.as_ref()), + io_some: Pressure::new(p.io_some.as_ref()), + io_full: Pressure::new(p.io_full.as_ref()), + irq_full: Pressure::new(p.irq_full.as_ref()), + }), + tmp_free_percent: item.get_tmp_free_percent(), + store_free_percent: item.get_store_free_percent(), + jobs_in_last_30s_start: item.jobs_in_last_30s_start.load(Ordering::Relaxed), + jobs_in_last_30s_count: item.jobs_in_last_30s_count.load(Ordering::Relaxed), + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::struct_excessive_bools)] +pub struct Machine { + systems: Vec, + hostname: String, + uptime: f64, + cpu_count: u32, + bogomips: f32, + speed_factor: f32, + max_jobs: u32, + tmp_avail_threshold: f64, + store_avail_threshold: f64, + load1_threshold: f32, + cpu_psi_threshold: f32, + mem_psi_threshold: f32, + io_psi_threshold: Option, + score: f32, + total_mem: u64, + supported_features: Vec, + mandatory_features: Vec, + cgroups: bool, + stats: MachineStats, + jobs: Vec, + + has_capacity: bool, + has_dynamic_capacity: bool, + has_static_capacity: bool, +} + +impl Machine { + pub fn from_state( + item: &Arc, + sort_fn: crate::config::MachineSortFn, + free_fn: crate::config::MachineFreeFn, + ) -> Self { + let jobs = { item.jobs.read().iter().map(|j| j.path.clone()).collect() }; + let time = chrono::Utc::now(); + Self { + systems: item.systems.clone(), + uptime: (time - item.joined_at).as_seconds_f64(), + hostname: item.hostname.clone(), + cpu_count: item.cpu_count, + bogomips: item.bogomips, + speed_factor: item.speed_factor, + max_jobs: item.max_jobs, + tmp_avail_threshold: item.tmp_avail_threshold, + store_avail_threshold: item.store_avail_threshold, + load1_threshold: item.load1_threshold, + cpu_psi_threshold: item.cpu_psi_threshold, + mem_psi_threshold: item.mem_psi_threshold, + io_psi_threshold: item.io_psi_threshold, + score: item.score(sort_fn), + total_mem: item.total_mem, + supported_features: item.supported_features.clone(), + mandatory_features: item.mandatory_features.clone(), + cgroups: item.cgroups, + stats: MachineStats::from(&item.stats, time.timestamp()), + jobs, + has_capacity: item.has_capacity(free_fn), + has_dynamic_capacity: item.has_dynamic_capacity(), + has_static_capacity: item.has_static_capacity(), + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct BuildQueueStats { + active_runnable: u64, + total_runnable: u64, + nr_runnable_waiting: u64, + nr_runnable_disabled: u64, + avg_runnable_time: u64, + wait_time_ms: u64, +} + +impl From for BuildQueueStats { + fn from(v: crate::state::BuildQueueStats) -> Self { + Self { + active_runnable: v.active_runnable, + total_runnable: v.total_runnable, + nr_runnable_waiting: v.nr_runnable_waiting, + nr_runnable_disabled: v.nr_runnable_disabled, + avg_runnable_time: v.avg_runnable_time, + wait_time_ms: v.wait_time, + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::struct_field_names)] +pub struct MemoryStats { + current_bytes: u64, + peak_bytes: u64, + swap_current_bytes: u64, + zswap_current_bytes: u64, +} + +impl MemoryStats { + fn new(cgroups_path: &std::path::Path) -> anyhow::Result { + Ok(Self { + current_bytes: std::fs::read_to_string(cgroups_path.join("memory.current"))? + .trim() + .parse() + .context("memory current parsing failed")?, + peak_bytes: std::fs::read_to_string(cgroups_path.join("memory.peak"))? + .trim() + .parse() + .context("memory peak parsing failed")?, + swap_current_bytes: std::fs::read_to_string(cgroups_path.join("memory.swap.current"))? + .trim() + .parse() + .context("swap parsing failed")?, + zswap_current_bytes: std::fs::read_to_string( + cgroups_path.join("memory.zswap.current"), + )? + .trim() + .parse() + .context("zswap parsing failed")?, + }) + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct IoStats { + total_read_bytes: u64, + total_write_bytes: u64, +} + +impl IoStats { + fn new(cgroups_path: &std::path::Path) -> anyhow::Result { + let mut total_read_bytes: u64 = 0; + let mut total_write_bytes: u64 = 0; + + let contents = std::fs::read_to_string(cgroups_path.join("io.stat"))?; + for line in contents.lines() { + for part in line.split_whitespace() { + if part.starts_with("rbytes=") { + total_read_bytes += part + .split('=') + .nth(1) + .and_then(|v| v.trim().parse().ok()) + .unwrap_or(0); + } else if part.starts_with("wbytes=") { + total_write_bytes += part + .split('=') + .nth(1) + .and_then(|v| v.trim().parse().ok()) + .unwrap_or(0); + } + } + } + + Ok(Self { + total_read_bytes, + total_write_bytes, + }) + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::struct_field_names)] +pub struct CpuStats { + usage_usec: u128, + user_usec: u128, + system_usec: u128, +} + +impl CpuStats { + fn new(cgroups_path: &std::path::Path) -> anyhow::Result { + let contents = std::fs::read_to_string(cgroups_path.join("cpu.stat"))?; + + let mut usage_usec: u128 = 0; + let mut user_usec: u128 = 0; + let mut system_usec: u128 = 0; + + for line in contents.lines() { + if line.starts_with("usage_usec") { + usage_usec = line + .split_whitespace() + .nth(1) + .and_then(|v| v.trim().parse().ok()) + .unwrap_or(0); + } else if line.starts_with("user_usec") { + user_usec = line + .split_whitespace() + .nth(1) + .and_then(|v| v.trim().parse().ok()) + .unwrap_or(0); + } else if line.starts_with("system_usec") { + system_usec = line + .split_whitespace() + .nth(1) + .and_then(|v| v.trim().parse().ok()) + .unwrap_or(0); + } + } + Ok(Self { + usage_usec, + user_usec, + system_usec, + }) + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct CgroupStats { + memory: MemoryStats, + io: IoStats, + cpu: CpuStats, +} + +impl CgroupStats { + fn new(me: &procfs::process::Process) -> anyhow::Result { + let cgroups_pathname = format!( + "/sys/fs/cgroup/{}", + me.cgroups()? + .0 + .first() + .ok_or(anyhow::anyhow!("cgroup information is missing in process."))? + .pathname + ); + let cgroups_path = std::path::Path::new(&cgroups_pathname); + if !cgroups_path.exists() { + return Err(anyhow::anyhow!("cgroups directory does not exists.")); + } + + Ok(Self { + memory: MemoryStats::new(cgroups_path)?, + io: IoStats::new(cgroups_path)?, + cpu: CpuStats::new(cgroups_path)?, + }) + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Process { + pid: i32, + vsize_bytes: u64, + rss_bytes: u64, + shared_bytes: u64, + cgroup: Option, +} + +impl Process { + fn new() -> Option { + let me = procfs::process::Process::myself().ok()?; + let page_size = procfs::page_size(); + let statm = me.statm().ok()?; + let vsize = statm.size * page_size; + let rss = statm.resident * page_size; + let shared = statm.shared * page_size; + Some(Self { + pid: me.pid, + vsize_bytes: vsize, + rss_bytes: rss, + shared_bytes: shared, + cgroup: match CgroupStats::new(&me) { + Ok(v) => Some(v), + Err(e) => { + log::error!("failed to cgroups stats: {e}"); + None + } + }, + }) + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct StoreStats { + nar_info_read: u64, + nar_info_read_averted: u64, + nar_info_missing: u64, + nar_info_write: u64, + path_info_cache_size: u64, + nar_read: u64, + nar_read_bytes: u64, + nar_read_compressed_bytes: u64, + nar_write: u64, + nar_write_averted: u64, + nar_write_bytes: u64, + nar_write_compressed_bytes: u64, + nar_write_compression_time_ms: u64, + nar_compression_savings: f64, + nar_compression_speed: f64, +} + +impl StoreStats { + fn new(v: &nix_utils::StoreStats) -> Self { + #[allow(clippy::cast_precision_loss)] + Self { + nar_info_read: v.nar_info_read, + nar_info_read_averted: v.nar_info_read_averted, + nar_info_missing: v.nar_info_missing, + nar_info_write: v.nar_info_write, + path_info_cache_size: v.path_info_cache_size, + nar_read: v.nar_read, + nar_read_bytes: v.nar_read_bytes, + nar_read_compressed_bytes: v.nar_read_compressed_bytes, + nar_write: v.nar_write, + nar_write_averted: v.nar_write_averted, + nar_write_bytes: v.nar_write_bytes, + nar_write_compressed_bytes: v.nar_write_compressed_bytes, + nar_write_compression_time_ms: v.nar_write_compression_time_ms, + nar_compression_savings: if v.nar_write_bytes > 0 { + 1.0 - (v.nar_write_compressed_bytes as f64 / v.nar_write_bytes as f64) + } else { + 0.0 + }, + nar_compression_speed: if v.nar_write_compression_time_ms > 0 { + v.nar_write_bytes as f64 / v.nar_write_compression_time_ms as f64 * 1000.0 + / (1024.0 * 1024.0) + } else { + 0.0 + }, + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct S3Stats { + put: u64, + put_bytes: u64, + put_time_ms: u64, + put_speed: f64, + get: u64, + get_bytes: u64, + get_time_ms: u64, + get_speed: f64, + head: u64, + cost_dollar_approx: f64, +} + +impl S3Stats { + fn new(v: &nix_utils::S3Stats) -> Self { + #[allow(clippy::cast_precision_loss)] + Self { + put: v.put, + put_bytes: v.put_bytes, + put_time_ms: v.put_time_ms, + put_speed: if v.put_time_ms > 0 { + v.put_bytes as f64 / v.put_time_ms as f64 * 1000.0 / (1024.0 * 1024.0) + } else { + 0.0 + }, + get: v.get, + get_bytes: v.get_bytes, + get_time_ms: v.get_time_ms, + get_speed: if v.get_time_ms > 0 { + v.get_bytes as f64 / v.get_time_ms as f64 * 1000.0 / (1024.0 * 1024.0) + } else { + 0.0 + }, + head: v.head, + cost_dollar_approx: (v.get as f64 + v.head as f64) / 10000.0 * 0.004 + + v.put as f64 / 1000.0 * 0.005 + + v.get_bytes as f64 / (1024.0 * 1024.0 * 1024.0) * 0.09, + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct QueueRunnerStats { + status: &'static str, + time: chrono::DateTime, + uptime: f64, + proc: Option, + supported_features: Vec, + + build_count: usize, + jobset_count: usize, + step_count: usize, + runnable_count: usize, + queue_stats: AHashMap, + + queue_checks_started: u64, + queue_build_loads: u64, + queue_steps_created: u64, + queue_checks_early_exits: u64, + queue_checks_finished: u64, + + dispatcher_time_spent_running: u64, + dispatcher_time_spent_waiting: u64, + + queue_monitor_time_spent_running: u64, + queue_monitor_time_spent_waiting: u64, + + nr_builds_read: i64, + build_read_time_ms: i64, + nr_builds_unfinished: i64, + nr_builds_done: i64, + nr_steps_started: i64, + nr_steps_done: i64, + nr_steps_building: i64, + nr_steps_waiting: i64, + nr_steps_runnable: i64, + nr_steps_unfinished: i64, + nr_unsupported_steps: i64, + nr_unsupported_steps_aborted: i64, + nr_substitutes_started: i64, + nr_substitutes_failed: i64, + nr_substitutes_succeeded: i64, + nr_retries: i64, + max_nr_retries: i64, + avg_step_time_ms: i64, + avg_step_import_time_ms: i64, + avg_step_build_time_ms: i64, + total_step_time_ms: i64, + total_step_import_time_ms: i64, + total_step_build_time_ms: i64, + nr_queue_wakeups: i64, + nr_dispatcher_wakeups: i64, + dispatch_time_ms: i64, + machines_total: i64, + machines_in_use: i64, +} + +impl QueueRunnerStats { + pub async fn new(state: Arc) -> Self { + let build_count = state.get_nr_builds_unfinished(); + let jobset_count = { state.jobsets.read().len() }; + let step_count = state.get_nr_steps_unfinished(); + let runnable_count = state.get_nr_runnable(); + let queue_stats = { + let queues = state.queues.read().await; + queues + .iter() + .map(|(system, queue)| (system.clone(), queue.get_stats().into())) + .collect() + }; + + state.metrics.refresh_dynamic_metrics(&state).await; + + let time = chrono::Utc::now(); + Self { + status: "up", + time, + uptime: (time - state.started_at).as_seconds_f64(), + proc: Process::new(), + supported_features: state.machines.get_supported_features(), + build_count, + jobset_count, + step_count, + runnable_count, + queue_stats, + queue_checks_started: state.metrics.queue_checks_started.get(), + queue_build_loads: state.metrics.queue_build_loads.get(), + queue_steps_created: state.metrics.queue_steps_created.get(), + queue_checks_early_exits: state.metrics.queue_checks_early_exits.get(), + queue_checks_finished: state.metrics.queue_checks_finished.get(), + + dispatcher_time_spent_running: state.metrics.dispatcher_time_spent_running.get(), + dispatcher_time_spent_waiting: state.metrics.dispatcher_time_spent_waiting.get(), + + queue_monitor_time_spent_running: state.metrics.queue_monitor_time_spent_running.get(), + queue_monitor_time_spent_waiting: state.metrics.queue_monitor_time_spent_waiting.get(), + + nr_builds_read: state.metrics.nr_builds_read.get(), + build_read_time_ms: state.metrics.build_read_time_ms.get(), + nr_builds_unfinished: state.metrics.nr_builds_unfinished.get(), + nr_builds_done: state.metrics.nr_builds_done.get(), + nr_steps_started: state.metrics.nr_steps_started.get(), + nr_steps_done: state.metrics.nr_steps_done.get(), + nr_steps_building: state.metrics.nr_steps_building.get(), + nr_steps_waiting: state.metrics.nr_steps_waiting.get(), + nr_steps_runnable: state.metrics.nr_steps_runnable.get(), + nr_steps_unfinished: state.metrics.nr_steps_unfinished.get(), + nr_unsupported_steps: state.metrics.nr_unsupported_steps.get(), + nr_unsupported_steps_aborted: state.metrics.nr_unsupported_steps_aborted.get(), + nr_substitutes_started: state.metrics.nr_substitutes_started.get(), + nr_substitutes_failed: state.metrics.nr_substitutes_failed.get(), + nr_substitutes_succeeded: state.metrics.nr_substitutes_succeeded.get(), + nr_retries: state.metrics.nr_retries.get(), + max_nr_retries: state.metrics.max_nr_retries.get(), + avg_step_time_ms: state.metrics.avg_step_time_ms.get(), + avg_step_import_time_ms: state.metrics.avg_step_import_time_ms.get(), + avg_step_build_time_ms: state.metrics.avg_step_build_time_ms.get(), + total_step_time_ms: state.metrics.total_step_time_ms.get(), + total_step_import_time_ms: state.metrics.total_step_import_time_ms.get(), + total_step_build_time_ms: state.metrics.total_step_build_time_ms.get(), + nr_queue_wakeups: state.metrics.nr_queue_wakeups.get(), + nr_dispatcher_wakeups: state.metrics.nr_dispatcher_wakeups.get(), + dispatch_time_ms: state.metrics.dispatch_time_ms.get(), + machines_total: state.metrics.machines_total.get(), + machines_in_use: state.metrics.machines_in_use.get(), + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct DumpResponse { + queue_runner: QueueRunnerStats, + machines: AHashMap, + jobsets: AHashMap, + store: AHashMap, + s3: AHashMap, +} + +impl DumpResponse { + pub fn new( + queue_runner: QueueRunnerStats, + machines: AHashMap, + jobsets: AHashMap, + local_store: &nix_utils::LocalStore, + remote_stores: &[nix_utils::RemoteStore], + ) -> Self { + let mut store_stats = remote_stores + .iter() + .filter_map(|s| { + Some(( + s.base_uri.clone(), + StoreStats::new(&s.get_store_stats().ok()?), + )) + }) + .collect::>(); + if let Ok(s) = local_store.get_store_stats() { + store_stats.insert("local".into(), StoreStats::new(&s)); + } + + Self { + queue_runner, + machines, + jobsets, + store: store_stats, + s3: remote_stores + .iter() + .filter_map(|s| Some((s.base_uri.clone(), S3Stats::new(&s.get_s3_stats().ok()?)))) + .collect(), + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct MachinesResponse { + machines: AHashMap, + machines_count: usize, +} + +impl MachinesResponse { + pub fn new(machines: AHashMap) -> Self { + Self { + machines_count: machines.len(), + machines, + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Jobset { + id: crate::state::JobsetID, + project_name: String, + name: String, + + seconds: i64, + shares: u32, +} + +impl From> for Jobset { + fn from(item: std::sync::Arc) -> Self { + Self { + id: item.id, + project_name: item.project_name.clone(), + name: item.name.clone(), + seconds: item.get_seconds(), + shares: item.get_shares(), + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct JobsetsResponse { + jobsets: AHashMap, + jobset_count: usize, +} + +impl JobsetsResponse { + pub fn new(jobsets: AHashMap) -> Self { + Self { + jobset_count: jobsets.len(), + jobsets, + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct Build { + id: BuildID, + drv_path: nix_utils::StorePath, + jobset_id: crate::state::JobsetID, + name: String, + timestamp: chrono::DateTime, + max_silent_time: i32, + timeout: i32, + local_priority: i32, + global_priority: i32, + finished_in_db: bool, +} + +impl From> for Build { + fn from(item: std::sync::Arc) -> Self { + Self { + id: item.id, + drv_path: item.drv_path.clone(), + jobset_id: item.jobset_id, + name: item.name.clone(), + timestamp: item.timestamp, + max_silent_time: item.max_silent_time, + timeout: item.timeout, + local_priority: item.local_priority, + global_priority: item.global_priority.load(Ordering::Relaxed), + finished_in_db: item.get_finished_in_db(), + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct BuildsResponse { + builds: Vec, + build_count: usize, +} + +impl BuildsResponse { + pub fn new(builds: Vec) -> Self { + Self { + build_count: builds.len(), + builds, + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +#[allow(clippy::struct_excessive_bools)] +pub struct Step { + drv_path: nix_utils::StorePath, + runnable: bool, + finished: bool, + previous_failure: bool, + + created: bool, + tries: u32, + highest_global_priority: i32, + highest_local_priority: i32, + + lowest_build_id: BuildID, + deps_count: usize, +} + +impl From> for Step { + fn from(item: std::sync::Arc) -> Self { + Self { + drv_path: item.get_drv_path().clone(), + runnable: item.get_runnable(), + finished: item.get_finished(), + previous_failure: item.get_previous_failure(), + created: item.atomic_state.get_created(), + tries: item.atomic_state.tries.load(Ordering::Relaxed), + highest_global_priority: item + .atomic_state + .highest_global_priority + .load(Ordering::Relaxed), + highest_local_priority: item + .atomic_state + .highest_local_priority + .load(Ordering::Relaxed), + lowest_build_id: item.atomic_state.lowest_build_id.load(Ordering::Relaxed), + deps_count: item.get_deps_size(), + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct StepsResponse { + steps: Vec, + step_count: usize, +} + +impl StepsResponse { + pub fn new(steps: Vec) -> Self { + Self { + step_count: steps.len(), + steps, + } + } +} + +#[allow(clippy::struct_excessive_bools)] +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct StepInfo { + drv_path: nix_utils::StorePath, + already_scheduled: bool, + runnable: bool, + finished: bool, + cancelled: bool, + runnable_since: chrono::DateTime, + + tries: u32, + + lowest_share_used: f64, + highest_global_priority: i32, + highest_local_priority: i32, + lowest_build_id: BuildID, +} + +impl From> for StepInfo { + fn from(item: std::sync::Arc) -> Self { + Self { + drv_path: item.step.get_drv_path().clone(), + already_scheduled: item.get_already_scheduled(), + runnable: item.step.get_runnable(), + finished: item.step.get_finished(), + cancelled: item.get_cancelled(), + runnable_since: item.runnable_since, + tries: item.step.atomic_state.tries.load(Ordering::Relaxed), + lowest_share_used: item.lowest_share_used, + highest_global_priority: item.highest_global_priority, + highest_local_priority: item.highest_local_priority, + lowest_build_id: item.lowest_build_id, + } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct QueueResponse { + queues: AHashMap>, +} + +impl QueueResponse { + pub fn new(queues: AHashMap>) -> Self { + Self { queues } + } +} + +#[derive(Debug, serde::Serialize)] +#[serde(rename_all = "camelCase")] +pub struct StepInfoResponse { + steps: Vec, + step_count: usize, +} + +impl StepInfoResponse { + pub fn new(steps: Vec) -> Self { + Self { + step_count: steps.len(), + steps, + } + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/main.rs b/src/hydra-queue-runner-v2/queue-runner/src/main.rs new file mode 100644 index 000000000..e20f18ae7 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/main.rs @@ -0,0 +1,135 @@ +#![deny(clippy::all)] +#![deny(clippy::pedantic)] + +use state::State; + +mod config; +mod io; +mod server; +mod state; +mod utils; + +#[cfg(not(target_env = "msvc"))] +#[global_allocator] +static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; + +fn start_task_loops(state: std::sync::Arc) -> Vec { + log::info!("QueueRunner starting task loops"); + + vec![ + spawn_config_reloader(state.clone(), state.config.clone(), &state.args.config_path), + state.clone().start_queue_monitor_loop(), + state.clone().start_dispatch_loop(), + state.clone().start_dump_status_loop(), + state.start_uploader_queue(), + ] +} + +fn spawn_config_reloader( + state: std::sync::Arc, + current_config: config::App, + filepath: &str, +) -> tokio::task::AbortHandle { + let filepath = filepath.to_owned(); + let task = tokio::spawn(async move { + loop { + tokio::signal::unix::signal(tokio::signal::unix::SignalKind::hangup()) + .unwrap() + .recv() + .await + .unwrap(); + log::info!("Reloading..."); + config::reload(¤t_config, &filepath, &state); + } + }); + task.abort_handle() +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let reload_handle = config::init_tracing()?; + + let state = State::new(reload_handle).await?; + if state.args.status { + state.get_status_from_main_process().await?; + return Ok(()); + } + + if !state.args.mtls_configured_correctly() { + log::error!( + "mtls configured inproperly, please pass all options: server_cert_path, server_key_path and client_ca_cert_path!" + ); + return Err(anyhow::anyhow!("Configuration issue")); + } + + let lockfile_path = state.config.get_lockfile(); + let _lock = lockfile::Lockfile::create_with_parents(&lockfile_path).map_err(|e| { + anyhow::anyhow!( + "Another instance is already running. Path={} Internal Error: {e}", + lockfile_path.display() + ) + })?; + + let task_abort_handles = start_task_loops(state.clone()); + log::info!( + "QueueRunner listening on grpc: {:?} and rest: {}", + state.args.grpc_bind, + state.args.rest_bind + ); + let srv1 = server::grpc::Server::run(state.args.grpc_bind.clone(), state.clone()); + let srv2 = server::http::Server::run(state.args.rest_bind, state.clone()); + + let task = tokio::spawn(async move { + match futures_util::future::join(srv1, srv2).await { + (Ok(()), Ok(())) => Ok(()), + (Ok(()), Err(e)) => Err(anyhow::anyhow!("hyper error while awaiting handle: {e}")), + (Err(e), Ok(())) => Err(anyhow::anyhow!("tonic error while awaiting handle: {e}")), + (Err(e1), Err(e2)) => Err(anyhow::anyhow!( + "tonic and hyper error while awaiting handle: {e1} | {e2}" + )), + } + }); + + let _notify = sd_notify::notify( + false, + &[ + sd_notify::NotifyState::Status("Running"), + sd_notify::NotifyState::Ready, + ], + ); + + let mut sigint = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::interrupt())?; + let mut sigterm = tokio::signal::unix::signal(tokio::signal::unix::SignalKind::terminate())?; + + let abort_handle = task.abort_handle(); + tokio::select! { + _ = sigint.recv() => { + log::info!("Received sigint - shutting down gracefully"); + let _ = sd_notify::notify(false, &[sd_notify::NotifyState::Stopping]); + abort_handle.abort(); + for h in task_abort_handles { + h.abort(); + } + // removing all machines will also mark all currently running jobs as cancelled + state.remove_all_machines().await; + let _ = state.clear_busy().await; + Ok(()) + } + _ = sigterm.recv() => { + log::info!("Received sigterm - shutting down gracefully"); + let _ = sd_notify::notify(false, &[sd_notify::NotifyState::Stopping]); + abort_handle.abort(); + for h in task_abort_handles { + h.abort(); + } + // removing all machines will also mark all currently running jobs as cancelled + state.remove_all_machines().await; + let _ = state.clear_busy().await; + Ok(()) + } + r = task => { + r??; + Ok(()) + } + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/server/grpc.rs b/src/hydra-queue-runner-v2/queue-runner/src/server/grpc.rs new file mode 100644 index 000000000..c5870a3ec --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/server/grpc.rs @@ -0,0 +1,473 @@ +use std::sync::Arc; + +use anyhow::Context as _; +use tokio::{io::AsyncWriteExt as _, sync::mpsc}; +use tracing::Instrument as _; + +use crate::{ + config::BindSocket, + server::grpc::runner_v1::{BuildResultState, StepUpdate}, + state::{Machine, MachineMessage, State}, +}; +use nix_utils::BaseStore as _; +use runner_v1::{ + BuildResultInfo, BuilderRequest, FetchRequisitesRequest, JoinResponse, LogChunk, NarData, + RunnerRequest, SimplePingMessage, StorePath, StorePaths, builder_request, + runner_service_server::{RunnerService, RunnerServiceServer}, +}; + +type BuilderResult = Result, tonic::Status>; +type OpenTunnelResponseStream = + std::pin::Pin> + Send>>; +type StreamFileResponseStream = + std::pin::Pin> + Send>>; + +// there is no reason to make this configurable, it only exists so we ensure the channel is not +// closed. we dont use this to write any actual information. +const BACKWARDS_PING_INTERVAL: u64 = 30; + +pub mod runner_v1 { + // We need to allow pedantic here because of generated code + #![allow(clippy::pedantic)] + + tonic::include_proto!("runner.v1"); + + pub(crate) const FILE_DESCRIPTOR_SET: &[u8] = + tonic::include_file_descriptor_set!("streaming_descriptor"); + + impl From for db::models::StepStatus { + fn from(item: StepStatus) -> Self { + match item { + StepStatus::Preparing => Self::Preparing, + StepStatus::Connecting => Self::Connecting, + StepStatus::SeningInputs => Self::SendingInputs, + StepStatus::Building => Self::Building, + StepStatus::WaitingForLocalSlot => Self::WaitingForLocalSlot, + StepStatus::ReceivingOutputs => Self::ReceivingOutputs, + StepStatus::PostProcessing => Self::PostProcessing, + } + } + } +} + +fn match_for_io_error(err_status: &tonic::Status) -> Option<&std::io::Error> { + let mut err: &(dyn std::error::Error + 'static) = err_status; + + loop { + if let Some(io_err) = err.downcast_ref::() { + return Some(io_err); + } + + // h2::Error do not expose std::io::Error with `source()` + // https://github.com/hyperium/h2/pull/462 + if let Some(h2_err) = err.downcast_ref::() { + if let Some(io_err) = h2_err.get_io() { + return Some(io_err); + } + } + + err = err.source()?; + } +} + +#[tracing::instrument(skip(state, msg))] +fn handle_message(state: &Arc, msg: builder_request::Message) { + match msg { + // at this point in time, builder already joined, so this message can be ignored + builder_request::Message::Join(_) => (), + builder_request::Message::Ping(msg) => { + log::debug!("new ping: {msg:?}"); + let Ok(machine_id) = uuid::Uuid::parse_str(&msg.machine_id) else { + return; + }; + if let Some(m) = state.machines.get_machine_by_id(machine_id) { + m.stats.store_ping(&msg); + } + } + #[allow(unreachable_patterns)] + _ => log::warn!("unhandled message: {msg:?}"), + } +} + +pub struct Server { + state: Arc, +} + +impl Server { + pub async fn run(addr: BindSocket, state: Arc) -> anyhow::Result<()> { + let service = RunnerServiceServer::new(Self { + state: state.clone(), + }) + .send_compressed(tonic::codec::CompressionEncoding::Zstd) + .accept_compressed(tonic::codec::CompressionEncoding::Zstd) + .max_decoding_message_size(50 * 1024 * 1024) + .max_encoding_message_size(50 * 1024 * 1024); + + let mut server = + tonic::transport::Server::builder().trace_fn(|_| tracing::info_span!("grpc_server")); + + if state.args.mtls_enabled() { + log::info!("Using mtls"); + let (client_ca_cert, server_identity) = state + .args + .get_mtls() + .await + .context("Failed to get_mtls Certificate and Identity")?; + + let tls = tonic::transport::ServerTlsConfig::new() + .identity(server_identity) + .client_ca_root(client_ca_cert); + server = server.tls_config(tls)?; + } + let reflection_service = tonic_reflection::server::Builder::configure() + .register_encoded_file_descriptor_set(runner_v1::FILE_DESCRIPTOR_SET) + .build_v1()?; + let server = server.add_service(reflection_service).add_service(service); + + match addr { + BindSocket::Tcp(s) => server.serve(s).await?, + BindSocket::Unix(p) => { + let uds = tokio::net::UnixListener::bind(p)?; + let uds_stream = tokio_stream::wrappers::UnixListenerStream::new(uds); + server.serve_with_incoming(uds_stream).await?; + } + BindSocket::ListenFd => { + let listener = listenfd::ListenFd::from_env() + .take_unix_listener(0)? + .ok_or(anyhow::anyhow!("No listenfd found in env"))?; + listener.set_nonblocking(true)?; + let listener = tokio_stream::wrappers::UnixListenerStream::new( + tokio::net::UnixListener::from_std(listener)?, + ); + + server.serve_with_incoming(listener).await?; + } + } + + Ok(()) + } +} + +#[tonic::async_trait] +impl RunnerService for Server { + type OpenTunnelStream = OpenTunnelResponseStream; + type StreamFileStream = StreamFileResponseStream; + type StreamFilesStream = StreamFileResponseStream; + + #[tracing::instrument(skip(self, req), err)] + async fn open_tunnel( + &self, + req: tonic::Request>, + ) -> BuilderResult { + use tokio_stream::StreamExt as _; + + let mut stream = req.into_inner(); + let (input_tx, mut input_rx) = mpsc::channel::(128); + let machine = match stream.next().await { + Some(Ok(m)) => match m.message { + Some(runner_v1::builder_request::Message::Join(v)) => { + Machine::new(v, input_tx).ok() + } + _ => None, + }, + _ => None, + }; + let Some(machine) = machine else { + return Err(tonic::Status::invalid_argument("No Ping message was sent")); + }; + + let state = self.state.clone(); + let machine_id = state.insert_machine(machine.clone()).await; + log::info!("Registered new machine: machine_id={machine_id} machine={machine}",); + + let (output_tx, output_rx) = mpsc::channel(128); + if let Err(e) = output_tx + .send(Ok(RunnerRequest { + message: Some(runner_v1::runner_request::Message::Join(JoinResponse { + machine_id: machine_id.to_string(), + max_concurrent_downloads: state.config.get_max_concurrent_downloads(), + })), + })) + .await + { + log::error!("Failed to send join response machine_id={machine_id} e={e}"); + return Err(tonic::Status::internal("Failed to send join Response.")); + } + + let mut ping_interval = + tokio::time::interval(std::time::Duration::from_secs(BACKWARDS_PING_INTERVAL)); + tokio::spawn(async move { + loop { + tokio::select! { + _ = ping_interval.tick() => { + let msg = RunnerRequest { + message: Some(runner_v1::runner_request::Message::Ping(SimplePingMessage { + message: "ping".into(), + })) + }; + if let Err(e) = output_tx.send(Ok(msg)).await { + log::error!("Failed to send message to machine={machine_id} e={e}"); + state.remove_machine(machine_id).await; + break + } + }, + msg = input_rx.recv() => { + if let Some(msg) = msg { + if let Err(e) = output_tx.send(Ok(msg.into_request())).await { + log::error!("Failed to send message to machine={machine_id} e={e}"); + state.remove_machine(machine_id).await; + break + } + } else { + state.remove_machine(machine_id).await; + break + } + }, + msg = stream.next() => match msg.map(|v| v.map(|v| v.message)) { + Some(Ok(Some(msg))) => handle_message(&state, msg), + Some(Ok(None)) => (), // empty meesage can be ignored + Some(Err(err)) => { + if let Some(io_err) = match_for_io_error(&err) { + if io_err.kind() == std::io::ErrorKind::BrokenPipe { + log::error!("client disconnected: broken pipe: machine={machine_id}"); + state.remove_machine(machine_id).await; + break; + } + } + + match output_tx.send(Err(err)).await { + Ok(()) => (), + Err(_err) => { + state.remove_machine(machine_id).await; + break + } + } + }, + None => { + state.remove_machine(machine_id).await; + break + } + } + } + } + }); + + Ok(tonic::Response::new( + Box::pin(tokio_stream::wrappers::ReceiverStream::new(output_rx)) + as Self::OpenTunnelStream, + )) + } + + #[tracing::instrument(skip(self, req), err)] + async fn build_log( + &self, + req: tonic::Request>, + ) -> BuilderResult { + use tokio_stream::StreamExt as _; + + let mut stream = req.into_inner(); + let state = self.state.clone(); + + let mut out_file: Option = None; + while let Some(chunk) = stream.next().await { + let chunk = chunk?; + + if let Some(ref mut file) = out_file { + file.write_all(&chunk.data).await?; + } else { + let mut file = state + .new_log_file(&nix_utils::StorePath::new(&chunk.drv)) + .await + .map_err(|_| tonic::Status::internal("Failed to create log file."))?; + file.write_all(&chunk.data).await?; + out_file = Some(file); + } + } + + Ok(tonic::Response::new(runner_v1::Empty {})) + } + + #[tracing::instrument(skip(self, req), err)] + async fn build_result( + &self, + req: tonic::Request>, + ) -> BuilderResult { + let stream = req.into_inner(); + // TODO: we leak memory if we use the store from state, so we open and close a new + // connection for each import. This sucks but using the state.store will result in the path + // not being closed! + let store = nix_utils::LocalStore::init(); + store + .import_paths( + tokio_stream::StreamExt::map(stream, |s| { + s.map(|v| v.chunk.into()) + .map_err(|e| std::io::Error::new(std::io::ErrorKind::UnexpectedEof, e)) + }), + false, + ) + .await + .map_err(|_| tonic::Status::internal("Failed to import path."))?; + + Ok(tonic::Response::new(runner_v1::Empty {})) + } + + #[tracing::instrument(skip(self), err)] + async fn build_step_update( + &self, + req: tonic::Request, + ) -> BuilderResult { + let state = self.state.clone(); + + let req = req.into_inner(); + let drv = req.drv.clone(); + let machine_id = uuid::Uuid::parse_str(&req.machine_id); + let step_status = db::models::StepStatus::from(req.step_status()); + + tokio::spawn({ + async move { + if let Err(e) = state + .update_build_step( + machine_id.ok(), + &nix_utils::StorePath::new(&drv), + step_status, + ) + .await + { + log::error!( + "Failed to update build step with drv={drv} step_status={step_status:?}: {e}" + ); + } + }.in_current_span() + }); + + Ok(tonic::Response::new(runner_v1::Empty {})) + } + + #[tracing::instrument(skip(self, req), fields(machine_id=req.get_ref().machine_id, drv=req.get_ref().drv), err)] + async fn complete_build( + &self, + req: tonic::Request, + ) -> BuilderResult { + let state = self.state.clone(); + + let req = req.into_inner(); + let drv = req.drv.clone(); + let machine_id = uuid::Uuid::parse_str(&req.machine_id); + + tokio::spawn({ + async move { + if req.result_state() == BuildResultState::Success { + let build_output = crate::state::BuildOutput::from(req); + if let Err(e) = state + .succeed_step( + machine_id.ok(), + &nix_utils::StorePath::new(&drv), + build_output, + ) + .await + { + log::error!("Failed to mark step with drv={drv} as done: {e}"); + } + } else if let Err(e) = state + .fail_step( + machine_id.ok(), + &nix_utils::StorePath::new(&drv), + req.result_state().into(), + std::time::Duration::from_millis(req.import_time_ms), + std::time::Duration::from_millis(req.build_time_ms), + ) + .await + { + log::error!("Failed to fail step with drv={drv}: {e}"); + } + } + .in_current_span() + }); + + Ok(tonic::Response::new(runner_v1::Empty {})) + } + + #[tracing::instrument(skip(self, req), err)] + async fn fetch_drv_requisites( + &self, + req: tonic::Request, + ) -> BuilderResult { + let state = self.state.clone(); + let req = req.into_inner(); + let drv = nix_utils::StorePath::new(&req.path); + + let requisites = state + .store + .query_requisites(vec![drv], req.include_outputs) + .await + .map_err(|e| { + log::error!("failed to toposort drv e={e}"); + tonic::Status::internal("failed to toposort drv.") + })? + .into_iter() + .map(nix_utils::StorePath::into_base_name) + .collect(); + + Ok(tonic::Response::new(runner_v1::DrvRequisitesMessage { + requisites, + })) + } + + #[tracing::instrument(skip(self, req), err)] + async fn stream_file( + &self, + req: tonic::Request, + ) -> BuilderResult { + let state = self.state.clone(); + + let path = nix_utils::StorePath::new(&req.into_inner().path); + let store = state.store.clone(); + let (tx, rx) = tokio::sync::mpsc::unbounded_channel::>(); + + let closure = move |data: &[u8]| { + let data = Vec::from(data); + tx.send(Ok(NarData { chunk: data })).is_ok() + }; + + tokio::task::spawn(async move { + let _ = store.export_paths(&[path], closure); + }); + + Ok(tonic::Response::new( + Box::pin(tokio_stream::wrappers::UnboundedReceiverStream::new(rx)) + as Self::StreamFileStream, + )) + } + + #[tracing::instrument(skip(self, req), err)] + async fn stream_files( + &self, + req: tonic::Request, + ) -> BuilderResult { + let state = self.state.clone(); + + let req = req.into_inner(); + let paths = req + .paths + .into_iter() + .map(|p| nix_utils::StorePath::new(&p)) + .collect::>(); + + let store = state.store.clone(); + let (tx, rx) = tokio::sync::mpsc::unbounded_channel::>(); + + let closure = move |data: &[u8]| { + let data = Vec::from(data); + tx.send(Ok(NarData { chunk: data })).is_ok() + }; + + tokio::task::spawn(async move { + let _ = store.export_paths(&paths, closure.clone()); + }); + + Ok(tonic::Response::new( + Box::pin(tokio_stream::wrappers::UnboundedReceiverStream::new(rx)) + as Self::StreamFilesStream, + )) + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/server/http.rs b/src/hydra-queue-runner-v2/queue-runner/src/server/http.rs new file mode 100644 index 000000000..4ed122043 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/server/http.rs @@ -0,0 +1,424 @@ +use std::{net::SocketAddr, sync::Arc}; + +use crate::state::State; +use bytes::Bytes; +use http_body_util::{BodyExt, Full, combinators::BoxBody}; +use tracing::Instrument as _; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("uuid error: `{0}`")] + Uuid(#[from] uuid::Error), + + #[error("serde json error: `{0}`")] + SerdeJson(#[from] serde_json::Error), + + #[error("hyper http error: `{0}`")] + HyperHttp(#[from] hyper::http::Error), + + #[error("hyper error: `{0}`")] + Hyper(#[from] hyper::Error), + + #[error("std io error: `{0}`")] + Io(#[from] std::io::Error), + + #[error("anyhow error: `{0}`")] + Anyhow(#[from] anyhow::Error), + + #[error("db error: `{0}`")] + Sqlx(#[from] db::Error), + + #[error("Not found")] + NotFound, + + #[error("Fatal")] + #[allow(dead_code)] + Fatal, +} + +impl Error { + pub fn get_status(&self) -> hyper::StatusCode { + match *self { + Self::Uuid(_) + | Self::SerdeJson(_) + | Self::HyperHttp(_) + | Self::Hyper(_) + | Self::Io(_) + | Self::Anyhow(_) + | Self::Sqlx(_) + | Self::Fatal => hyper::StatusCode::INTERNAL_SERVER_ERROR, + Self::NotFound => hyper::StatusCode::NOT_FOUND, + } + } + + pub fn get_body(&self) -> crate::io::Error { + crate::io::Error { + error: self.to_string(), + } + } +} + +fn full>(chunk: T) -> BoxBody { + Full::new(chunk.into()) + .map_err(|never| match never {}) + .boxed() +} + +fn construct_json_response( + status: hyper::StatusCode, + data: &U, +) -> Result>, Error> { + Ok(hyper::Response::builder() + .status(status) + .header(hyper::header::CONTENT_TYPE, "application/json") + .body(full(serde_json::to_string(data)?))?) +} + +fn construct_json_ok_response( + data: &U, +) -> Result>, Error> { + construct_json_response(hyper::StatusCode::OK, data) +} + +pub struct Server {} +impl Server { + pub async fn run(addr: SocketAddr, state: Arc) -> Result<(), Error> { + async move { + let listener = tokio::net::TcpListener::bind(&addr).await?; + let server_span = tracing::span!(tracing::Level::TRACE, "http_server", %addr); + + loop { + let (stream, _) = listener.accept().await?; + let io = hyper_util::rt::TokioIo::new(stream); + + let state = state.clone(); + tokio::task::spawn({ + let server_span = server_span.clone(); + async move { + if let Err(err) = hyper::server::conn::http1::Builder::new() + .serve_connection( + io, + hyper::service::service_fn(move |req| router(req, state.clone())), + ) + .instrument(server_span.clone()) + .await + { + log::error!("Error serving connection: {err:?}"); + } + } + }); + } + } + .await + } +} + +async fn router( + req: hyper::Request, + state: Arc, +) -> Result>, Error> { + let span = tracing::span!( + tracing::Level::INFO, + "request", + method = ?req.method(), + uri = ?req.uri(), + headers = ?req.headers() + ); + async move { + let r = match (req.method(), req.uri().path()) { + (&hyper::Method::GET, "/status") => handler::status::get(req, state).await, + (&hyper::Method::GET, "/status/machines") => { + handler::status::machines(req, state).await + } + (&hyper::Method::GET, "/status/jobsets") => handler::status::jobsets(req, state), + (&hyper::Method::GET, "/status/builds") => handler::status::builds(req, state), + (&hyper::Method::GET, "/status/steps") => handler::status::steps(req, state), + (&hyper::Method::GET, "/status/runnable") => handler::status::runnable(req, state), + (&hyper::Method::GET, "/status/queues") => handler::status::queues(req, state).await, + (&hyper::Method::GET, "/status/queues/jobs") => { + handler::status::queue_jobs(req, state).await + } + (&hyper::Method::GET, "/status/queues/scheduled") => { + handler::status::queue_scheduled(req, state).await + } + (&hyper::Method::POST, "/dump_status") => handler::dump_status::post(req, state).await, + (&hyper::Method::PUT, "/build") => handler::build::put(req, state).await, + (&hyper::Method::GET, "/metrics") => handler::metrics::get(req, state).await, + _ => Err(Error::NotFound), + }; + if let Err(r) = r.as_ref() { + construct_json_response(r.get_status(), &r.get_body()) + } else { + r + } + } + .instrument(span) + .await +} + +mod handler { + pub mod status { + use bytes::Bytes; + use http_body_util::combinators::BoxBody; + + use super::super::{Error, construct_json_ok_response}; + use crate::{io, state::State}; + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub async fn get( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let queue_stats = io::QueueRunnerStats::new(state.clone()).await; + let sort_fn = state.config.get_sort_fn(); + let free_fn = state.config.get_free_fn(); + let machines = state + .machines + .get_all_machines() + .into_iter() + .map(|m| { + ( + m.hostname.clone(), + crate::io::Machine::from_state(&m, sort_fn, free_fn), + ) + }) + .collect(); + let jobsets = { + let jobsets = state.jobsets.read(); + jobsets + .values() + .map(|v| (v.full_name(), v.clone().into())) + .collect() + }; + let remote_stores = { + let stores = state.remote_stores.read(); + stores.clone() + }; + construct_json_ok_response(&io::DumpResponse::new( + queue_stats, + machines, + jobsets, + &state.store, + &remote_stores, + )) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub async fn machines( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let sort_fn = state.config.get_sort_fn(); + let free_fn = state.config.get_free_fn(); + let machines = state + .machines + .get_all_machines() + .into_iter() + .map(|m| { + ( + m.hostname.clone(), + crate::io::Machine::from_state(&m, sort_fn, free_fn), + ) + }) + .collect(); + construct_json_ok_response(&io::MachinesResponse::new(machines)) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub fn jobsets( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let jobsets = { + let jobsets = state.jobsets.read(); + jobsets + .values() + .map(|v| (v.full_name(), v.clone().into())) + .collect() + }; + construct_json_ok_response(&io::JobsetsResponse::new(jobsets)) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub fn builds( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let builds: Vec = { + state + .builds + .read() + .values() + .map(|v| v.clone().into()) + .collect() + }; + construct_json_ok_response(&io::BuildsResponse::new(builds)) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub fn steps( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let steps: Vec = { + state + .steps + .read() + .values() + .filter_map(std::sync::Weak::upgrade) + .map(Into::into) + .collect() + }; + construct_json_ok_response(&io::StepsResponse::new(steps)) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub fn runnable( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let steps: Vec = { + state + .steps + .read() + .values() + .filter_map(std::sync::Weak::upgrade) + .filter(|v| v.get_runnable()) + .map(Into::into) + .collect() + }; + construct_json_ok_response(&io::StepsResponse::new(steps)) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub async fn queues( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let queues = state + .queues + .read() + .await + .iter() + .map(|(s, q)| { + ( + s.clone(), + q.clone_inner() + .into_iter() + .filter_map(|v| v.upgrade().map(Into::into)) + .collect(), + ) + }) + .collect(); + construct_json_ok_response(&io::QueueResponse::new(queues)) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub async fn queue_jobs( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let stepinfos = state + .queues + .read() + .await + .get_jobs() + .into_iter() + .map(Into::into) + .collect(); + construct_json_ok_response(&io::StepInfoResponse::new(stepinfos)) + } + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub async fn queue_scheduled( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let stepinfos = state + .queues + .read() + .await + .get_scheduled() + .into_iter() + .map(Into::into) + .collect(); + construct_json_ok_response(&io::StepInfoResponse::new(stepinfos)) + } + } + + pub mod dump_status { + use bytes::Bytes; + use http_body_util::combinators::BoxBody; + + use super::super::{Error, construct_json_ok_response}; + use crate::{io, state::State}; + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub async fn post( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let mut db = state.db.get().await?; + let mut tx = db.begin_transaction().await?; + tx.notify_dump_status().await?; + tx.commit().await?; + construct_json_ok_response(&io::Empty {}) + } + } + + pub mod build { + use bytes::{Buf as _, Bytes}; + use http_body_util::{BodyExt, combinators::BoxBody}; + + use super::super::{Error, construct_json_ok_response}; + use crate::{io, state::State}; + + #[tracing::instrument(skip(req, state), err)] + pub async fn put( + req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let whole_body = req.collect().await?.aggregate(); + let data: io::BuildPayload = serde_json::from_reader(whole_body.reader())?; + + state + .queue_one_build(data.jobset_id, &nix_utils::StorePath::new(&data.drv)) + .await?; + construct_json_ok_response(&io::Empty {}) + } + } + + pub mod metrics { + use bytes::Bytes; + use http_body_util::combinators::BoxBody; + + use super::super::{Error, full}; + use crate::state::State; + + #[allow(clippy::no_effect_underscore_binding)] + #[tracing::instrument(skip(_req, state), err)] + pub async fn get( + _req: hyper::Request, + state: std::sync::Arc, + ) -> Result>, Error> { + let metrics = state.metrics.gather_metrics(&state).await?; + Ok(hyper::Response::builder() + .status(hyper::StatusCode::OK) + .header( + hyper::header::CONTENT_TYPE, + "text/plain; version=0.0.4; charset=utf-8", + ) + .body(full(metrics))?) + } + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/server/mod.rs b/src/hydra-queue-runner-v2/queue-runner/src/server/mod.rs new file mode 100644 index 000000000..8eb789485 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/server/mod.rs @@ -0,0 +1,2 @@ +pub mod grpc; +pub mod http; diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/atomic.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/atomic.rs new file mode 100644 index 000000000..5886e4836 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/atomic.rs @@ -0,0 +1,36 @@ +use chrono::{DateTime, Utc}; +use std::sync::atomic::{AtomicI64, Ordering}; + +#[derive(Debug)] +pub struct AtomicDateTime { + inner: AtomicI64, +} + +impl Default for AtomicDateTime { + fn default() -> Self { + AtomicDateTime::new(Utc::now()) + } +} + +impl AtomicDateTime { + pub fn new(dt: DateTime) -> Self { + Self { + inner: AtomicI64::new( + dt + .timestamp_nanos_opt() + .expect("datetime not in range: 1677-09-21T00:12:43.145224192 and 2262-04-11T23:47:16.854775807."), + ), + } + } + + pub fn load(&self) -> DateTime { + let nanos = self.inner.load(Ordering::Relaxed); + DateTime::::from_timestamp_nanos(nanos) + } + + pub fn store(&self, dt: DateTime) { + if let Some(v) = dt.timestamp_nanos_opt() { + self.inner.store(v, Ordering::Relaxed); + } + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/build.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/build.rs new file mode 100644 index 000000000..7a34873aa --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/build.rs @@ -0,0 +1,807 @@ +#![allow(dead_code)] + +use std::{ + collections::{HashMap, HashSet}, + sync::{ + Arc, Weak, + atomic::{AtomicBool, AtomicI32, AtomicU32, Ordering}, + }, +}; + +use ahash::{AHashMap, AHashSet}; +use chrono::TimeZone; + +use super::jobset::{Jobset, JobsetID}; +use db::models::{BuildID, BuildStatus}; +use nix_utils::BaseStore as _; + +pub type AtomicBuildID = AtomicI32; + +#[derive(Debug)] +pub struct Build { + pub id: BuildID, + pub drv_path: nix_utils::StorePath, + pub outputs: HashMap, + pub jobset_id: JobsetID, + pub name: String, + pub timestamp: chrono::DateTime, + pub max_silent_time: i32, + pub timeout: i32, + pub local_priority: i32, + pub global_priority: AtomicI32, + + toplevel: arc_swap::ArcSwapOption, + pub jobset: Arc, + + finished_in_db: AtomicBool, +} + +impl PartialEq for Build { + fn eq(&self, other: &Self) -> bool { + self.drv_path == other.drv_path + } +} + +impl Eq for Build {} + +impl std::hash::Hash for Build { + fn hash(&self, state: &mut H) { + // ensure that drv_path is never mutable + // as we set Build as ignore-interior-mutability + self.drv_path.hash(state); + } +} + +impl Build { + pub fn new_debug(drv_path: &nix_utils::StorePath) -> Arc { + Arc::new(Self { + id: BuildID::MAX, + drv_path: drv_path.to_owned(), + outputs: HashMap::new(), + jobset_id: JobsetID::MAX, + name: "debug".into(), + timestamp: chrono::Utc::now(), + max_silent_time: i32::MAX, + timeout: i32::MAX, + local_priority: 1000, + global_priority: 1000.into(), + toplevel: arc_swap::ArcSwapOption::from(None), + jobset: Arc::new(Jobset::new(JobsetID::MAX, "debug", "debug")), + finished_in_db: false.into(), + }) + } + + #[tracing::instrument(skip(v, jobset), err)] + pub fn new(v: db::models::Build, jobset: Arc) -> anyhow::Result> { + Ok(Arc::new(Self { + id: v.id, + drv_path: nix_utils::StorePath::new(&v.drvpath), + outputs: HashMap::new(), + jobset_id: v.jobset_id, + name: v.job, + timestamp: chrono::Utc.timestamp_opt(v.timestamp, 0).single().ok_or( + anyhow::anyhow!("Failed to convert unix timestamp into chrono::UTC"), + )?, + max_silent_time: v.maxsilent.unwrap_or(3600), + timeout: v.timeout.unwrap_or(36000), + local_priority: v.priority, + global_priority: v.globalpriority.into(), + toplevel: arc_swap::ArcSwapOption::from(None), + jobset, + finished_in_db: false.into(), + })) + } + + #[inline] + pub fn full_job_name(&self) -> String { + format!( + "{}:{}:{}", + self.jobset.project_name, self.jobset.name, self.name + ) + } + + #[inline] + pub fn get_finished_in_db(&self) -> bool { + self.finished_in_db.load(Ordering::SeqCst) + } + + #[inline] + pub fn set_finished_in_db(&self, v: bool) { + self.finished_in_db.store(v, Ordering::SeqCst); + } + + #[inline] + pub fn set_toplevel_step(&self, step: Arc) { + self.toplevel.store(Some(step)); + } + + pub fn propagate_priorities(&self) { + let mut queued = AHashSet::new(); + let mut todo = std::collections::VecDeque::new(); + { + let toplevel = self.toplevel.load(); + if let Some(toplevel) = toplevel.as_ref() { + todo.push_back(toplevel.clone()); + } + } + + while let Some(step) = todo.pop_front() { + step.atomic_state.highest_global_priority.store( + std::cmp::max( + step.atomic_state + .highest_global_priority + .load(Ordering::Relaxed), + self.global_priority.load(Ordering::Relaxed), + ), + Ordering::Relaxed, + ); + step.atomic_state.highest_local_priority.store( + std::cmp::max( + step.atomic_state + .highest_local_priority + .load(Ordering::Relaxed), + self.local_priority, + ), + Ordering::Relaxed, + ); + step.atomic_state.lowest_build_id.store( + std::cmp::min( + step.atomic_state.lowest_build_id.load(Ordering::Relaxed), + self.id, + ), + Ordering::Relaxed, + ); + { + let mut state = step.state.write(); + state.jobsets.insert(self.jobset.clone()); + } + + let state = step.state.read(); + for dep in &state.deps { + if !queued.contains(dep) { + queued.insert(dep.clone()); + todo.push_back(dep.clone()); + } + } + } + } +} + +#[derive(Debug)] +pub struct StepAtomicState { + created: AtomicBool, // Whether the step has finished initialisation. + pub tries: AtomicU32, // Number of times we've tried this step. + pub highest_global_priority: AtomicI32, // The highest global priority of any build depending on this step. + pub highest_local_priority: AtomicI32, // The highest local priority of any build depending on this step. + + pub lowest_build_id: AtomicBuildID, // The lowest ID of any build depending on this step. + + pub after: super::AtomicDateTime, // Point in time after which the step can be retried. + pub runnable_since: super::AtomicDateTime, // The time at which this step became runnable. + pub last_supported: super::AtomicDateTime, // The time that we last saw a machine that supports this step +} + +impl StepAtomicState { + pub fn new( + after: chrono::DateTime, + runnable_since: chrono::DateTime, + ) -> Self { + Self { + created: false.into(), + tries: 0.into(), + highest_global_priority: 0.into(), + highest_local_priority: 0.into(), + lowest_build_id: BuildID::MAX.into(), + after: super::AtomicDateTime::new(after), + runnable_since: super::AtomicDateTime::new(runnable_since), + last_supported: super::AtomicDateTime::default(), + } + } + + #[inline] + pub fn get_created(&self) -> bool { + self.created.load(Ordering::SeqCst) + } + + #[inline] + pub fn set_created(&self, v: bool) { + self.created.store(v, Ordering::SeqCst); + } +} + +#[derive(Debug)] +pub struct StepState { + pub deps: HashSet>, // The build steps on which this step depends. + pub rdeps: Vec>, // The build steps that depend on this step. + pub builds: Vec>, // Builds that have this step as the top-level derivation. + pub jobsets: AHashSet>, // Jobsets to which this step belongs. Used for determining scheduling priority. +} + +impl StepState { + pub fn new() -> Self { + Self { + deps: HashSet::new(), + rdeps: Vec::new(), + builds: Vec::new(), + jobsets: AHashSet::new(), + } + } +} + +#[derive(Debug)] +pub struct Step { + drv_path: nix_utils::StorePath, + drv: arc_swap::ArcSwapOption, + + runnable: AtomicBool, + finished: AtomicBool, + previous_failure: AtomicBool, + pub atomic_state: StepAtomicState, + pub state: parking_lot::RwLock, +} + +impl PartialEq for Step { + fn eq(&self, other: &Self) -> bool { + self.drv_path == other.drv_path + } +} + +impl Eq for Step {} + +impl std::hash::Hash for Step { + fn hash(&self, state: &mut H) { + // ensure that drv_path is never mutable + // as we set Step as ignore-interior-mutability + self.drv_path.hash(state); + } +} + +impl Step { + pub fn new(drv_path: nix_utils::StorePath) -> Arc { + Arc::new(Self { + drv_path, + drv: arc_swap::ArcSwapOption::from(None), + runnable: false.into(), + finished: false.into(), + previous_failure: false.into(), + atomic_state: StepAtomicState::new( + chrono::DateTime::::from_timestamp_nanos(0), + chrono::DateTime::::from_timestamp_nanos(0), + ), + state: parking_lot::RwLock::new(StepState::new()), + }) + } + + #[inline] + pub fn get_drv_path(&self) -> &nix_utils::StorePath { + &self.drv_path + } + + #[inline] + pub fn get_finished(&self) -> bool { + self.finished.load(Ordering::SeqCst) + } + + #[inline] + pub fn set_finished(&self, v: bool) { + self.finished.store(v, Ordering::SeqCst); + } + + #[inline] + pub fn get_previous_failure(&self) -> bool { + self.previous_failure.load(Ordering::SeqCst) + } + + #[inline] + pub fn set_previous_failure(&self, v: bool) { + self.previous_failure.store(v, Ordering::SeqCst); + } + + #[inline] + pub fn get_runnable(&self) -> bool { + self.runnable.load(Ordering::SeqCst) + } + + pub fn set_drv(&self, drv: nix_utils::Derivation) { + self.drv.store(Some(Arc::new(drv))); + } + + pub fn get_system(&self) -> Option { + let drv = self.drv.load_full(); + drv.as_ref().map(|drv| drv.system.clone()) + } + + pub fn get_input_drvs(&self) -> Option> { + let drv = self.drv.load_full(); + drv.as_ref().map(|drv| drv.input_drvs.clone()) + } + + pub fn get_after(&self) -> chrono::DateTime { + self.atomic_state.after.load() + } + + pub fn set_after(&self, v: chrono::DateTime) { + self.atomic_state.after.store(v); + } + + pub fn get_runnable_since(&self) -> chrono::DateTime { + self.atomic_state.runnable_since.load() + } + + pub fn get_last_supported(&self) -> chrono::DateTime { + self.atomic_state.last_supported.load() + } + + pub fn set_last_supported_now(&self) { + self.atomic_state.last_supported.store(chrono::Utc::now()); + } + + pub fn get_outputs(&self) -> Option> { + let drv = self.drv.load_full(); + drv.as_ref().map(|drv| drv.outputs.clone()) + } + + pub fn get_required_features(&self) -> Vec { + let drv = self.drv.load_full(); + drv.as_ref() + .map(|drv| { + drv.env + .get_required_system_features() + .into_iter() + .map(ToOwned::to_owned) + .collect() + }) + .unwrap_or_default() + } + + #[tracing::instrument(skip(self, builds, steps))] + pub fn get_dependents( + self: &Arc, + builds: &mut AHashSet>, + steps: &mut AHashSet>, + ) { + if steps.contains(self) { + return; + } + steps.insert(self.clone()); + + let rdeps = { + let state = self.state.read(); + for b in &state.builds { + let Some(b) = b.upgrade() else { continue }; + + if !b.get_finished_in_db() { + builds.insert(b); + } + } + state.rdeps.clone() + }; + + for rdep in rdeps { + let Some(rdep) = rdep.upgrade() else { continue }; + rdep.get_dependents(builds, steps); + } + } + + pub fn get_deps_size(&self) -> usize { + let state = self.state.read(); + state.deps.len() + } + + pub fn make_rdeps_runnable(&self) { + if !self.get_finished() { + return; + } + + let state = self.state.read(); + for rdep in &state.rdeps { + let Some(rdep) = rdep.upgrade() else { + continue; + }; + + let mut runnable = false; + { + let mut rdep_state = rdep.state.write(); + rdep_state + .deps + .retain(|s| s.get_drv_path() != self.get_drv_path()); + if rdep_state.deps.is_empty() && rdep.atomic_state.get_created() { + runnable = true; + } + } + + if runnable { + rdep.make_runnable(); + } + } + } + + #[tracing::instrument(skip(self))] + pub fn make_runnable(&self) { + log::info!("step '{}' is now runnable", self.get_drv_path()); + debug_assert!(self.atomic_state.created.load(Ordering::SeqCst)); + debug_assert!(!self.get_finished()); + + #[cfg(debug_assertions)] + { + let state = self.state.read(); + debug_assert!(state.deps.is_empty()); + } + + self.atomic_state.runnable_since.store(chrono::Utc::now()); + self.runnable.store(true, Ordering::SeqCst); + } +} + +#[derive(Debug)] +pub enum BuildResultState { + Success, + BuildFailure, + PreparingFailure, + ImportFailure, + UploadFailure, + PostProcessingFailure, + Aborted, + Cancelled, +} + +impl From for BuildResultState { + fn from(v: crate::server::grpc::runner_v1::BuildResultState) -> Self { + match v { + crate::server::grpc::runner_v1::BuildResultState::BuildFailure => Self::BuildFailure, + crate::server::grpc::runner_v1::BuildResultState::Success => Self::Success, + crate::server::grpc::runner_v1::BuildResultState::PreparingFailure => { + Self::PreparingFailure + } + crate::server::grpc::runner_v1::BuildResultState::ImportFailure => Self::ImportFailure, + crate::server::grpc::runner_v1::BuildResultState::UploadFailure => Self::UploadFailure, + crate::server::grpc::runner_v1::BuildResultState::PostProcessingFailure => { + Self::PostProcessingFailure + } + } + } +} + +#[allow(clippy::struct_excessive_bools)] +#[derive(Debug, Clone)] +pub struct RemoteBuild { + pub step_status: BuildStatus, + pub can_retry: bool, // for bsAborted + pub is_cached: bool, // for bsSucceed + pub can_cache: bool, // for bsFailed + pub error_msg: Option, // for bsAborted + + pub times_build: i32, + pub is_non_deterministic: bool, + + pub start_time: Option>, + pub stop_time: Option>, + + pub overhead: i32, + pub log_file: String, +} + +impl RemoteBuild { + pub fn new() -> Self { + Self { + step_status: BuildStatus::Aborted, + can_retry: false, + is_cached: false, + can_cache: false, + error_msg: None, + times_build: 0, + is_non_deterministic: false, + start_time: None, + stop_time: None, + overhead: 0, + log_file: String::new(), + } + } + + pub fn get_total_step_time_ms(&self) -> u64 { + if let (Some(start_time), Some(stop_time)) = (self.start_time, self.stop_time) { + (stop_time - start_time).num_milliseconds().unsigned_abs() + } else { + 0 + } + } + + pub fn update_with_result_state(&mut self, state: &BuildResultState) { + match state { + BuildResultState::BuildFailure => { + self.can_retry = false; + } + BuildResultState::Success => (), + BuildResultState::PreparingFailure + | BuildResultState::ImportFailure + | BuildResultState::UploadFailure + | BuildResultState::PostProcessingFailure => { + self.can_retry = true; + } + BuildResultState::Aborted => { + self.can_retry = true; + self.step_status = BuildStatus::Aborted; + } + BuildResultState::Cancelled => { + self.can_retry = true; + self.step_status = BuildStatus::Cancelled; + } + } + } +} + +pub struct BuildProduct { + pub path: Option, + pub default_path: Option, + + pub r#type: String, + pub subtype: String, + pub name: String, + + pub is_regular: bool, + + pub sha256hash: Option, + pub file_size: Option, +} + +impl From for BuildProduct { + fn from(v: db::models::OwnedBuildProduct) -> Self { + Self { + path: v.path.map(|v| nix_utils::StorePath::new(&v)), + default_path: v.defaultpath, + r#type: v.r#type, + subtype: v.subtype, + name: v.name, + is_regular: v.filesize.is_some(), + sha256hash: v.sha256hash, + #[allow(clippy::cast_sign_loss)] + file_size: v.filesize.map(|v| v as u64), + } + } +} + +impl From for BuildProduct { + fn from(v: crate::server::grpc::runner_v1::BuildProduct) -> Self { + Self { + path: Some(nix_utils::StorePath::new(&v.path)), + default_path: Some(v.default_path), + r#type: v.r#type, + subtype: v.subtype, + name: v.name, + is_regular: v.is_regular, + sha256hash: v.sha256hash, + file_size: v.file_size, + } + } +} + +impl From for BuildProduct { + fn from(v: shared::BuildProduct) -> Self { + Self { + path: Some(nix_utils::StorePath::new(&v.path)), + default_path: Some(v.default_path), + r#type: v.r#type, + subtype: v.subtype, + name: v.name, + is_regular: v.is_regular, + sha256hash: v.sha256hash, + file_size: v.file_size, + } + } +} + +pub struct BuildMetric { + pub name: String, + pub unit: Option, + pub value: f64, +} + +impl From for BuildMetric { + fn from(v: db::models::OwnedBuildMetric) -> Self { + Self { + name: v.name, + unit: v.unit, + value: v.value, + } + } +} + +pub struct BuildOutput { + pub failed: bool, + pub import_elapsed: std::time::Duration, + pub build_elapsed: std::time::Duration, + + pub release_name: Option, + + pub closure_size: u64, + pub size: u64, + + pub products: Vec, + pub outputs: AHashMap, + pub metrics: AHashMap, +} + +impl TryFrom for BuildOutput { + type Error = anyhow::Error; + + fn try_from(v: db::models::BuildOutput) -> anyhow::Result { + let build_status = BuildStatus::from_i32( + v.buildstatus + .ok_or(anyhow::anyhow!("buildstatus missing"))?, + ) + .ok_or(anyhow::anyhow!("buildstatus did not map"))?; + Ok(Self { + failed: build_status != BuildStatus::Success, + import_elapsed: std::time::Duration::from_millis(0), + build_elapsed: std::time::Duration::from_millis(0), + release_name: v.releasename, + #[allow(clippy::cast_sign_loss)] + closure_size: v.closuresize.unwrap_or_default() as u64, + #[allow(clippy::cast_sign_loss)] + size: v.size.unwrap_or_default() as u64, + products: vec![], + outputs: AHashMap::new(), + metrics: AHashMap::new(), + }) + } +} + +impl From for BuildOutput { + fn from(v: crate::server::grpc::runner_v1::BuildResultInfo) -> Self { + let mut outputs = AHashMap::new(); + let mut closure_size = 0; + let mut nar_size = 0; + + for o in v.outputs { + match o.output { + Some(crate::server::grpc::runner_v1::output::Output::Nameonly(_)) => { + // We dont care about outputs that dont have a path, + } + Some(crate::server::grpc::runner_v1::output::Output::Withpath(o)) => { + outputs.insert(o.name, nix_utils::StorePath::new(&o.path)); + closure_size += o.closure_size; + nar_size += o.nar_size; + } + None => (), + } + } + let (failed, release_name, products, metrics) = if let Some(nix_support) = v.nix_support { + ( + nix_support.failed, + nix_support.hydra_release_name, + nix_support.products, + nix_support.metrics, + ) + } else { + (false, None, vec![], vec![]) + }; + + Self { + failed, + import_elapsed: std::time::Duration::from_millis(v.import_time_ms), + build_elapsed: std::time::Duration::from_millis(v.build_time_ms), + release_name, + closure_size, + size: nar_size, + products: products.into_iter().map(Into::into).collect(), + outputs, + metrics: metrics + .into_iter() + .map(|v| { + ( + v.path, + BuildMetric { + name: v.name, + unit: v.unit, + value: v.value, + }, + ) + }) + .collect(), + } + } +} + +impl BuildOutput { + #[tracing::instrument(skip(store, outputs), err)] + pub async fn new( + store: &nix_utils::LocalStore, + outputs: Vec, + ) -> anyhow::Result { + let flat_outputs = outputs + .iter() + .filter_map(|o| o.path.as_ref()) + .collect::>(); + let pathinfos = store.query_path_infos(&flat_outputs); + let nix_support = shared::parse_nix_support_from_outputs(&outputs).await?; + + let mut outputs_map = AHashMap::new(); + let mut closure_size = 0; + let mut nar_size = 0; + + for o in outputs { + if let Some(path) = o.path { + if let Some(info) = pathinfos.get(&path) { + closure_size += store.compute_closure_size(&path); + nar_size += info.nar_size; + outputs_map.insert(o.name, path); + } + } + } + + Ok(Self { + failed: nix_support.failed, + import_elapsed: std::time::Duration::from_millis(0), + build_elapsed: std::time::Duration::from_millis(0), + release_name: nix_support.hydra_release_name, + closure_size, + size: nar_size, + products: nix_support.products.into_iter().map(Into::into).collect(), + outputs: outputs_map, + metrics: nix_support + .metrics + .into_iter() + .map(|v| { + ( + v.path, + BuildMetric { + name: v.name, + unit: v.unit, + value: v.value, + }, + ) + }) + .collect(), + }) + } +} + +pub fn get_mark_build_sccuess_data<'a>( + b: &'a Arc, + res: &'a crate::state::BuildOutput, +) -> db::models::MarkBuildSuccessData<'a> { + db::models::MarkBuildSuccessData { + id: b.id, + name: &b.name, + project_name: &b.jobset.project_name, + jobset_name: &b.jobset.name, + finished_in_db: b.get_finished_in_db(), + timestamp: b.timestamp, + failed: res.failed, + closure_size: res.closure_size, + size: res.size, + release_name: res.release_name.as_deref(), + outputs: res + .outputs + .iter() + .map(|(name, path)| (name.clone(), path.get_full_path())) + .collect(), + products: res + .products + .iter() + .map(|v| db::models::BuildProduct { + r#type: &v.r#type, + subtype: &v.subtype, + filesize: v.file_size.and_then(|v| i64::try_from(v).ok()), + sha256hash: v.sha256hash.as_deref(), + path: v.path.as_ref().map(nix_utils::StorePath::get_full_path), + name: &v.name, + defaultpath: v.default_path.as_deref(), + }) + .collect(), + metrics: res + .metrics + .iter() + .map(|(name, m)| { + ( + name.as_str(), + db::models::BuildMetric { + name: &m.name, + unit: m.unit.as_deref(), + value: m.value, + }, + ) + }) + .collect(), + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/jobset.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/jobset.rs new file mode 100644 index 000000000..f01c7209c --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/jobset.rs @@ -0,0 +1,99 @@ +use std::{ + collections::BTreeMap, + sync::atomic::{AtomicI64, AtomicU32, Ordering}, +}; + +pub type JobsetID = i32; +pub const SCHEDULING_WINDOW: i64 = 24 * 60 * 60; + +#[derive(Debug)] +pub struct Jobset { + pub id: JobsetID, + pub project_name: String, + pub name: String, + + seconds: AtomicI64, + shares: AtomicU32, + // The start time and duration of the most recent build steps. + steps: parking_lot::RwLock>, +} + +impl PartialEq for Jobset { + fn eq(&self, other: &Self) -> bool { + self.id == other.id && self.project_name == other.project_name && self.name == other.name + } +} + +impl Eq for Jobset {} + +impl std::hash::Hash for Jobset { + fn hash(&self, state: &mut H) { + self.id.hash(state); + self.project_name.hash(state); + self.name.hash(state); + } +} + +impl Jobset { + pub fn new>(id: JobsetID, project_name: S, name: S) -> Self { + Self { + id, + project_name: project_name.into(), + name: name.into(), + seconds: 0.into(), + shares: 0.into(), + steps: parking_lot::RwLock::new(BTreeMap::new()), + } + } + + pub fn full_name(&self) -> String { + format!("{}:{}", self.project_name, self.name) + } + + pub fn share_used(&self) -> f64 { + let seconds = self.seconds.load(Ordering::Relaxed); + let shares = self.shares.load(Ordering::Relaxed); + + // we dont care about the precision here + #[allow(clippy::cast_precision_loss)] + ((seconds as f64) / f64::from(shares)) + } + + pub fn set_shares(&self, shares: i32) -> anyhow::Result<()> { + debug_assert!(shares > 0); + self.shares.store(shares.try_into()?, Ordering::Relaxed); + Ok(()) + } + + pub fn get_shares(&self) -> u32 { + self.shares.load(Ordering::Relaxed) + } + + pub fn get_seconds(&self) -> i64 { + self.seconds.load(Ordering::Relaxed) + } + + pub fn add_step(&self, start_time: i64, duration: i64) { + let mut steps = self.steps.write(); + steps.insert(start_time, duration); + self.seconds.fetch_add(duration, Ordering::Relaxed); + } + + pub fn prune_steps(&self) { + let now = chrono::Utc::now().timestamp(); + let mut steps = self.steps.write(); + + loop { + let Some(first) = steps.first_entry() else { + break; + }; + let start_time = *first.key(); + + if start_time > now - SCHEDULING_WINDOW { + break; + } + self.seconds.fetch_sub(*first.get(), Ordering::Relaxed); + steps.remove(&start_time); + } + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/machine.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/machine.rs new file mode 100644 index 000000000..d9ae9260b --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/machine.rs @@ -0,0 +1,723 @@ +use std::sync::{Arc, atomic::Ordering}; + +use ahash::{AHashMap, AHashSet}; +use tokio::sync::mpsc; + +use db::models::BuildID; + +use super::System; +use super::build::RemoteBuild; +use crate::config::MachineFreeFn; +use crate::{ + config::MachineSortFn, + server::grpc::runner_v1::{AbortMessage, BuildMessage, JoinMessage, runner_request}, +}; + +#[derive(Debug, Clone, Copy)] +pub struct Pressure { + pub avg10: f32, + pub avg60: f32, + pub avg300: f32, + pub total: u64, +} + +impl Pressure { + fn new(msg: Option) -> Option { + msg.map(|v| Self { + avg10: v.avg10, + avg60: v.avg60, + avg300: v.avg300, + total: v.total, + }) + } +} + +#[derive(Debug)] +pub struct PressureState { + pub cpu_some: Option, + pub mem_some: Option, + pub mem_full: Option, + pub io_some: Option, + pub io_full: Option, + pub irq_full: Option, +} + +#[derive(Debug)] +pub struct Stats { + current_jobs: std::sync::atomic::AtomicU64, + nr_steps_done: std::sync::atomic::AtomicU64, + total_step_time_ms: std::sync::atomic::AtomicU64, + total_step_import_time_ms: std::sync::atomic::AtomicU64, + total_step_build_time_ms: std::sync::atomic::AtomicU64, + idle_since: std::sync::atomic::AtomicI64, + + last_failure: std::sync::atomic::AtomicI64, + disabled_until: std::sync::atomic::AtomicI64, + consecutive_failures: std::sync::atomic::AtomicU64, + last_ping: std::sync::atomic::AtomicI64, + + load1: atomic_float::AtomicF32, + load5: atomic_float::AtomicF32, + load15: atomic_float::AtomicF32, + mem_usage: std::sync::atomic::AtomicU64, + pub pressure: arc_swap::ArcSwapOption, + tmp_free_percent: atomic_float::AtomicF64, + store_free_percent: atomic_float::AtomicF64, + + pub jobs_in_last_30s_start: std::sync::atomic::AtomicI64, + pub jobs_in_last_30s_count: std::sync::atomic::AtomicU64, +} + +impl Stats { + pub fn new() -> Self { + Self { + current_jobs: 0.into(), + nr_steps_done: 0.into(), + total_step_time_ms: 0.into(), + total_step_import_time_ms: 0.into(), + total_step_build_time_ms: 0.into(), + idle_since: (chrono::Utc::now().timestamp()).into(), + last_failure: 0.into(), + disabled_until: 0.into(), + consecutive_failures: 0.into(), + last_ping: 0.into(), + + load1: 0.0.into(), + load5: 0.0.into(), + load15: 0.0.into(), + mem_usage: 0.into(), + + pressure: arc_swap::ArcSwapOption::from(None), + tmp_free_percent: 0.0.into(), + store_free_percent: 0.0.into(), + + jobs_in_last_30s_start: 0.into(), + jobs_in_last_30s_count: 0.into(), + } + } + + pub fn store_current_jobs(&self, c: u64) { + if c == 0 && self.idle_since.load(Ordering::Relaxed) == 0 { + self.idle_since + .store(chrono::Utc::now().timestamp(), Ordering::Relaxed); + } else { + self.idle_since.store(0, Ordering::Relaxed); + } + + self.current_jobs.store(c, Ordering::Relaxed); + } + + pub fn get_current_jobs(&self) -> u64 { + self.current_jobs.load(Ordering::Relaxed) + } + + pub fn get_nr_steps_done(&self) -> u64 { + self.nr_steps_done.load(Ordering::Relaxed) + } + + pub fn incr_nr_steps_done(&self) { + self.nr_steps_done.fetch_add(1, Ordering::Relaxed); + } + + pub fn get_total_step_time_ms(&self) -> u64 { + self.total_step_time_ms.load(Ordering::Relaxed) + } + + pub fn add_to_total_step_time_ms(&self, v: u128) { + if let Ok(v) = u64::try_from(v) { + self.total_step_time_ms.fetch_add(v, Ordering::Relaxed); + } + } + + pub fn get_total_step_build_time_ms(&self) -> u64 { + self.total_step_build_time_ms.load(Ordering::Relaxed) + } + + pub fn add_to_total_step_build_time_ms(&self, v: u128) { + if let Ok(v) = u64::try_from(v) { + self.total_step_build_time_ms + .fetch_add(v, Ordering::Relaxed); + } + } + + pub fn get_total_step_import_time_ms(&self) -> u64 { + self.total_step_import_time_ms.load(Ordering::Relaxed) + } + + pub fn add_to_total_step_import_time_ms(&self, v: u128) { + if let Ok(v) = u64::try_from(v) { + self.total_step_import_time_ms + .fetch_add(v, Ordering::Relaxed); + } + } + + pub fn get_idle_since(&self) -> i64 { + self.idle_since.load(Ordering::Relaxed) + } + + pub fn get_last_failure(&self) -> i64 { + self.last_failure.load(Ordering::Relaxed) + } + + pub fn store_last_failure_now(&self) { + self.last_failure + .store(chrono::Utc::now().timestamp(), Ordering::Relaxed); + self.consecutive_failures.fetch_add(1, Ordering::Relaxed); + } + + pub fn get_disabled_until(&self) -> i64 { + self.disabled_until.load(Ordering::Relaxed) + } + + pub fn get_consecutive_failures(&self) -> u64 { + self.consecutive_failures.load(Ordering::Relaxed) + } + + pub fn reset_consecutive_failures(&self) { + self.consecutive_failures.store(0, Ordering::Relaxed); + } + + pub fn get_last_ping(&self) -> i64 { + self.last_ping.load(Ordering::Relaxed) + } + + pub fn store_ping(&self, msg: &crate::server::grpc::runner_v1::PingMessage) { + self.last_ping + .store(chrono::Utc::now().timestamp(), Ordering::Relaxed); + + self.load1.store(msg.load1, Ordering::Relaxed); + self.load5.store(msg.load5, Ordering::Relaxed); + self.load15.store(msg.load15, Ordering::Relaxed); + self.mem_usage.store(msg.mem_usage, Ordering::Relaxed); + + if let Some(p) = msg.pressure { + self.pressure.store(Some(Arc::new(PressureState { + cpu_some: Pressure::new(p.cpu_some), + mem_some: Pressure::new(p.mem_some), + mem_full: Pressure::new(p.mem_full), + io_some: Pressure::new(p.io_some), + io_full: Pressure::new(p.io_full), + irq_full: Pressure::new(p.irq_full), + }))); + } + + self.tmp_free_percent + .store(msg.tmp_free_percent, Ordering::Relaxed); + self.store_free_percent + .store(msg.store_free_percent, Ordering::Relaxed); + } + + pub fn get_load1(&self) -> f32 { + self.load1.load(Ordering::Relaxed) + } + + pub fn get_load5(&self) -> f32 { + self.load5.load(Ordering::Relaxed) + } + + pub fn get_load15(&self) -> f32 { + self.load15.load(Ordering::Relaxed) + } + + pub fn get_mem_usage(&self) -> u64 { + self.mem_usage.load(Ordering::Relaxed) + } + + pub fn get_tmp_free_percent(&self) -> f64 { + self.tmp_free_percent.load(Ordering::Relaxed) + } + + pub fn get_store_free_percent(&self) -> f64 { + self.store_free_percent.load(Ordering::Relaxed) + } +} + +struct MachinesInner { + by_uuid: AHashMap>, + // by_system is always sorted, as we insert sorted based on cpu score + by_system: AHashMap>>, +} + +impl MachinesInner { + fn sort(&mut self, sort_fn: MachineSortFn) { + for machines in self.by_system.values_mut() { + machines.sort_by(|a, b| { + let r = a.score(sort_fn).total_cmp(&b.score(sort_fn)).reverse(); + if r.is_eq() { + // if score is the same then we do a tiebreaker on current jobs + a.stats.get_current_jobs().cmp(&b.stats.get_current_jobs()) + } else { + r + } + }); + } + } +} + +pub struct Machines { + inner: parking_lot::RwLock, + supported_features: parking_lot::RwLock>, +} + +impl Machines { + pub fn new() -> Self { + Self { + inner: parking_lot::RwLock::new(MachinesInner { + by_uuid: AHashMap::new(), + by_system: AHashMap::new(), + }), + supported_features: parking_lot::RwLock::new(AHashSet::new()), + } + } + + pub fn sort(&self, sort_fn: MachineSortFn) { + let mut inner = self.inner.write(); + inner.sort(sort_fn); + } + + pub fn get_supported_features(&self) -> Vec { + let supported_features = self.supported_features.read(); + supported_features.iter().cloned().collect() + } + + pub fn support_step(&self, s: &Arc) -> bool { + // dup of machines.get_machine_for_system + let inner = self.inner.read(); + let Some(system) = s.get_system() else { + return false; + }; + let features = s.get_required_features(); + if system == "builtin" { + inner + .by_uuid + .values() + .any(|m| m.supports_all_features(&features)) + } else { + inner + .by_system + .get(&system) + .is_some_and(|v| v.iter().any(|m| m.supports_all_features(&features))) + } + } + + #[allow(dead_code)] + fn has_supported_features(&self, required_features: &[String]) -> bool { + let supported_features = self.supported_features.read(); + required_features + .iter() + .all(|f| supported_features.contains(f)) + } + + fn reconstruct_supported_features(&self) { + let all_supported_features = { + let inner = self.inner.read(); + inner + .by_uuid + .values() + .flat_map(|m| m.supported_features.clone()) + .collect::>() + }; + + { + let mut supported_features = self.supported_features.write(); + *supported_features = all_supported_features; + } + } + + #[tracing::instrument(skip(self, machine, sort_fn))] + pub fn insert_machine(&self, machine: Machine, sort_fn: MachineSortFn) -> uuid::Uuid { + let machine_id = machine.id; + { + let mut inner = self.inner.write(); + let machine = Arc::new(machine); + + inner.by_uuid.insert(machine_id, machine.clone()); + { + for system in &machine.systems { + let v = inner.by_system.entry(system.clone()).or_default(); + v.push(machine.clone()); + } + } + inner.sort(sort_fn); + } + self.reconstruct_supported_features(); + machine_id + } + + #[tracing::instrument(skip(self, machine_id))] + pub fn remove_machine(&self, machine_id: uuid::Uuid) -> Option> { + let m = { + let mut inner = self.inner.write(); + if let Some(m) = inner.by_uuid.remove(&machine_id) { + for system in &m.systems { + if let Some(v) = inner.by_system.get_mut(system) { + v.retain(|o| o.id != machine_id); + } + } + Some(m) + } else { + None + } + }; + self.reconstruct_supported_features(); + m + } + + #[tracing::instrument(skip(self, machine_id))] + pub fn get_machine_by_id(&self, machine_id: uuid::Uuid) -> Option> { + let inner = self.inner.read(); + inner.by_uuid.get(&machine_id).cloned() + } + + #[tracing::instrument(skip(self, system))] + pub fn get_machine_for_system( + &self, + system: &str, + required_features: &[String], + free_fn: MachineFreeFn, + ) -> Option> { + // dup of machines.support_step + let inner = self.inner.read(); + if system == "builtin" { + inner + .by_uuid + .values() + .find(|m| m.has_capacity(free_fn) && m.supports_all_features(required_features)) + .cloned() + } else { + inner.by_system.get(system).and_then(|machines| { + machines + .iter() + .find(|m| m.has_capacity(free_fn) && m.supports_all_features(required_features)) + .cloned() + }) + } + } + + #[tracing::instrument(skip(self))] + pub fn get_all_machines(&self) -> Vec> { + let inner = self.inner.read(); + inner.by_uuid.values().cloned().collect() + } + + #[tracing::instrument(skip(self))] + pub fn get_machine_count(&self) -> usize { + self.inner.read().by_uuid.len() + } + + #[tracing::instrument(skip(self))] + pub fn get_machine_count_in_use(&self) -> usize { + self.inner + .read() + .by_uuid + .iter() + .filter(|(_, v)| v.stats.get_current_jobs() > 0) + .count() + } +} + +#[derive(Debug, Clone)] +pub struct Job { + pub path: nix_utils::StorePath, + pub resolved_drv: Option, + pub build_id: BuildID, + pub step_nr: i32, + pub result: RemoteBuild, +} + +impl Job { + pub fn new( + build_id: BuildID, + path: nix_utils::StorePath, + resolved_drv: Option, + ) -> Self { + Self { + path, + resolved_drv, + build_id, + step_nr: 0, + result: RemoteBuild::new(), + } + } +} + +pub enum Message { + BuildMessage { + drv: nix_utils::StorePath, + resolved_drv: Option, + max_log_size: u64, + max_silent_time: i32, + build_timeout: i32, + }, + AbortMessage { + drv: nix_utils::StorePath, + }, +} + +impl Message { + pub fn into_request(self) -> crate::server::grpc::runner_v1::RunnerRequest { + let msg = match self { + Message::BuildMessage { + drv, + resolved_drv, + max_log_size, + max_silent_time, + build_timeout, + } => runner_request::Message::Build(BuildMessage { + drv: drv.into_base_name(), + resolved_drv: resolved_drv.map(nix_utils::StorePath::into_base_name), + max_log_size, + max_silent_time, + build_timeout, + }), + Message::AbortMessage { drv } => runner_request::Message::Abort(AbortMessage { + drv: drv.into_base_name(), + }), + }; + + crate::server::grpc::runner_v1::RunnerRequest { message: Some(msg) } + } +} + +#[derive(Debug, Clone)] +pub struct Machine { + pub id: uuid::Uuid, + pub systems: Vec, + pub hostname: String, + pub cpu_count: u32, + pub bogomips: f32, + pub speed_factor: f32, + pub max_jobs: u32, + pub tmp_avail_threshold: f64, + pub store_avail_threshold: f64, + pub load1_threshold: f32, + pub cpu_psi_threshold: f32, + pub mem_psi_threshold: f32, // If None, dont consider this value + pub io_psi_threshold: Option, // If None, dont consider this value + pub total_mem: u64, + pub supported_features: Vec, + pub mandatory_features: Vec, + pub cgroups: bool, + pub joined_at: chrono::DateTime, + + msg_queue: mpsc::Sender, + pub stats: Arc, + pub jobs: Arc>>, +} + +impl std::fmt::Display for Machine { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "Machine: [systems={:?} hostname={} cpu_count={} bogomips={:.2} speed_factor={:.2} max_jobs={} total_mem={:.2} supported_features={:?} cgroups={} joined_at={}]", + self.systems, + self.hostname, + self.cpu_count, + self.bogomips, + self.speed_factor, + self.max_jobs, + byte_unit::Byte::from_u64(self.total_mem).get_adjusted_unit(byte_unit::Unit::GB), + self.supported_features, + self.cgroups, + self.joined_at, + ) + } +} + +impl Machine { + pub fn new(msg: JoinMessage, tx: mpsc::Sender) -> anyhow::Result { + Ok(Self { + id: msg.machine_id.parse()?, + systems: msg.systems, + hostname: msg.hostname, + cpu_count: msg.cpu_count, + bogomips: msg.bogomips, + speed_factor: msg.speed_factor, + max_jobs: msg.max_jobs, + tmp_avail_threshold: msg.tmp_avail_threshold.into(), + store_avail_threshold: msg.store_avail_threshold.into(), + load1_threshold: msg.load1_threshold, + cpu_psi_threshold: msg.cpu_psi_threshold, + mem_psi_threshold: msg.mem_psi_threshold, + io_psi_threshold: msg.io_psi_threshold, + total_mem: msg.total_mem, + supported_features: msg.supported_features, + mandatory_features: msg.mandatory_features, + cgroups: msg.cgroups, + msg_queue: tx, + joined_at: chrono::Utc::now(), + stats: Arc::new(Stats::new()), + jobs: Arc::new(parking_lot::RwLock::new(Vec::new())), + }) + } + + #[tracing::instrument(skip(self, job, opts), err)] + pub async fn build_drv(&self, job: Job, opts: &nix_utils::BuildOptions) -> anyhow::Result<()> { + let drv = job.path.clone(); + self.msg_queue + .send(Message::BuildMessage { + drv, + resolved_drv: job.resolved_drv.clone(), + max_log_size: opts.get_max_log_size(), + max_silent_time: opts.get_max_silent_time(), + build_timeout: opts.get_build_timeout(), + }) + .await?; + + if self.stats.jobs_in_last_30s_count.load(Ordering::Relaxed) == 0 { + self.stats + .jobs_in_last_30s_start + .store(chrono::Utc::now().timestamp(), Ordering::Relaxed); + } + + self.insert_job(job); + self.stats + .jobs_in_last_30s_count + .fetch_add(1, Ordering::Relaxed); + + Ok(()) + } + + #[tracing::instrument(skip(self), fields(%drv), err)] + pub async fn abort_build(&self, drv: &nix_utils::StorePath) -> anyhow::Result<()> { + self.msg_queue + .send(Message::AbortMessage { + drv: drv.to_owned(), + }) + .await?; + + self.remove_job(drv); + Ok(()) + } + + pub fn has_dynamic_capacity(&self) -> bool { + let pressure = self.stats.pressure.load(); + + if let Some(cpu_some) = pressure.as_ref().and_then(|v| v.cpu_some) { + if cpu_some.avg10 > self.cpu_psi_threshold { + return false; + } + if let Some(mem_full) = pressure.as_ref().and_then(|v| v.mem_full) { + if mem_full.avg10 > self.mem_psi_threshold { + return false; + } + } + if let Some(threshold) = self.io_psi_threshold { + if let Some(io_full) = pressure.as_ref().and_then(|v| v.io_full) { + if io_full.avg10 > threshold { + return false; + } + } + } + } else if self.stats.get_load1() > self.load1_threshold { + return false; + } + + true + } + + pub fn has_static_capacity(&self) -> bool { + self.stats.get_current_jobs() < u64::from(self.max_jobs) + } + + pub fn has_capacity(&self, free_fn: MachineFreeFn) -> bool { + let now = chrono::Utc::now().timestamp(); + let jobs_in_last_30s_start = self.stats.jobs_in_last_30s_start.load(Ordering::Relaxed); + let jobs_in_last_30s_count = self.stats.jobs_in_last_30s_count.load(Ordering::Relaxed); + + // ensure that we dont submit more than 4 jobs in 30s + if now <= (jobs_in_last_30s_start + 30) + && jobs_in_last_30s_count >= 4 + // ensure that we havent already finished some of them, because then its fine again + && self.stats.get_current_jobs() >= 4 + { + return false; + } else if now > (jobs_in_last_30s_start + 30) { + // reset count + self.stats + .jobs_in_last_30s_start + .store(0, Ordering::Relaxed); + self.stats + .jobs_in_last_30s_count + .store(0, Ordering::Relaxed); + } + + if self.stats.get_tmp_free_percent() < self.tmp_avail_threshold { + return false; + } + + if self.stats.get_store_free_percent() < self.store_avail_threshold { + return false; + } + + match free_fn { + MachineFreeFn::Dynamic => self.has_dynamic_capacity(), + MachineFreeFn::DynamicWithMaxJobLimit => { + self.has_dynamic_capacity() && self.has_static_capacity() + } + MachineFreeFn::Static => self.has_static_capacity(), + } + } + + pub fn supports_all_features(&self, features: &[String]) -> bool { + // TODO: mandetory features + features.iter().all(|f| self.supported_features.contains(f)) + } + + pub fn score(&self, sort_fn: MachineSortFn) -> f32 { + match sort_fn { + MachineSortFn::SpeedFactorOnly => self.speed_factor, + MachineSortFn::CpuCoreCountWithSpeedFactor => + { + #[allow(clippy::cast_precision_loss)] + (self.speed_factor * (self.cpu_count as f32)) + } + MachineSortFn::BogomipsWithSpeedFactor => { + let bogomips = if self.bogomips > 1. { + self.bogomips + } else { + 1.0 + }; + #[allow(clippy::cast_precision_loss)] + (self.speed_factor * bogomips * (self.cpu_count as f32)) + } + } + } + + #[tracing::instrument(skip(self), fields(%drv))] + pub fn get_build_id_and_step_nr(&self, drv: &nix_utils::StorePath) -> Option<(i32, i32)> { + let jobs = self.jobs.read(); + let job = jobs.iter().find(|j| &j.path == drv).cloned(); + job.map(|j| (j.build_id, j.step_nr)) + } + + #[tracing::instrument(skip(self, job))] + fn insert_job(&self, job: Job) { + let mut jobs = self.jobs.write(); + jobs.push(job); + self.stats.store_current_jobs(jobs.len() as u64); + } + + #[tracing::instrument(skip(self), fields(%drv))] + pub fn remove_job(&self, drv: &nix_utils::StorePath) -> Option { + let mut jobs = self.jobs.write(); + let job = jobs.iter().find(|j| &j.path == drv).cloned(); + jobs.retain(|j| &j.path != drv); + self.stats.store_current_jobs(jobs.len() as u64); + self.stats.incr_nr_steps_done(); + + { + // if build finished fast we can subtract 1 here + let now = chrono::Utc::now().timestamp(); + let jobs_in_last_30s_start = self.stats.jobs_in_last_30s_start.load(Ordering::Relaxed); + + if now <= (jobs_in_last_30s_start + 30) { + self.stats + .jobs_in_last_30s_count + .fetch_sub(1, Ordering::Relaxed); + } + } + + job + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/metrics.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/metrics.rs new file mode 100644 index 000000000..d70f4e835 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/metrics.rs @@ -0,0 +1,383 @@ +use std::sync::Arc; + +use prometheus::Encoder; + +#[derive(Debug)] +pub struct PromMetrics { + pub registry: prometheus::Registry, + + pub queue_checks_started: prometheus::IntCounter, + pub queue_build_loads: prometheus::IntCounter, + pub queue_steps_created: prometheus::IntCounter, + pub queue_checks_early_exits: prometheus::IntCounter, + pub queue_checks_finished: prometheus::IntCounter, // TODO + + pub dispatcher_time_spent_running: prometheus::IntCounter, + pub dispatcher_time_spent_waiting: prometheus::IntCounter, + + pub queue_monitor_time_spent_running: prometheus::IntCounter, + pub queue_monitor_time_spent_waiting: prometheus::IntCounter, + + pub nr_builds_read: prometheus::IntGauge, // hydra_queue_builds_read + pub build_read_time_ms: prometheus::IntGauge, // hydra_queue_builds_time + pub nr_builds_unfinished: prometheus::IntGauge, // hydra_queue_builds_unfinished + pub nr_builds_done: prometheus::IntGauge, // hydra_queue_builds_finished + pub nr_steps_started: prometheus::IntGauge, // hydra_queue_steps_started + pub nr_steps_done: prometheus::IntGauge, // hydra_queue_steps_finished + pub nr_steps_building: prometheus::IntGauge, // hydra_queue_steps_building + pub nr_steps_waiting: prometheus::IntGauge, // hydra_queue_steps_waiting + pub nr_steps_runnable: prometheus::IntGauge, // hydra_queue_steps_runnable + pub nr_steps_unfinished: prometheus::IntGauge, // hydra_queue_steps_unfinished + pub nr_unsupported_steps: prometheus::IntGauge, // hydra_queue_steps_unsupported + pub nr_unsupported_steps_aborted: prometheus::IntGauge, // hydra_queue_steps_unsupported_aborted + pub nr_substitutes_started: prometheus::IntGauge, // hydra_queue_substitutes_started + pub nr_substitutes_failed: prometheus::IntGauge, // hydra_queue_substitutes_failed + pub nr_substitutes_succeeded: prometheus::IntGauge, // hydra_queue_substitutes_succeeded + pub nr_retries: prometheus::IntGauge, // hydra_queue_steps_retries + pub max_nr_retries: prometheus::IntGauge, // hydra_queue_steps_max_retries + pub avg_step_time_ms: prometheus::IntGauge, // hydra_queue_steps_avg_total_time + pub avg_step_import_time_ms: prometheus::IntGauge, // hydra_queue_steps_avg_import_time + pub avg_step_build_time_ms: prometheus::IntGauge, // hydra_queue_steps_avg_build_time + pub total_step_time_ms: prometheus::IntGauge, // hydra_queue_steps_total_time + pub total_step_import_time_ms: prometheus::IntGauge, // hydra_queue_steps_total_import_time + pub total_step_build_time_ms: prometheus::IntGauge, // hydra_queue_steps_total_build_time + pub nr_queue_wakeups: prometheus::IntGauge, //hydra_queue_checks + pub nr_dispatcher_wakeups: prometheus::IntGauge, // hydra_queue_dispatch_wakeup + pub dispatch_time_ms: prometheus::IntGauge, // hydra_queue_dispatch_time + pub machines_total: prometheus::IntGauge, // hydra_queue_machines_total + pub machines_in_use: prometheus::IntGauge, // hydra_queue_machines_in_use + pub runnable_per_machine_type: prometheus::IntGaugeVec, // hydra_queue_machines_runnable + pub running_per_machine_type: prometheus::IntGaugeVec, // hydra_queue_machines_running +} + +impl PromMetrics { + #[allow(clippy::too_many_lines)] + pub fn new() -> anyhow::Result { + let queue_checks_started = prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_queue_checks_started_total", + "Number of times State::get_queued_builds() was started", + ))?; + let queue_build_loads = prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_queue_build_loads_total", + "Number of builds loaded", + ))?; + let queue_steps_created = prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_queue_steps_created_total", + "Number of steps created", + ))?; + let queue_checks_early_exits = prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_queue_checks_early_exits_total", + "Number of times State::get_queued_builds() yielded to potential bumps", + ))?; + let queue_checks_finished = prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_queue_checks_finished_total", + "Number of times State::get_queued_builds() was completed", + ))?; + let dispatcher_time_spent_running = + prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_dispatcher_time_spent_running", + "Time (in micros) spent running the dispatcher", + ))?; + let dispatcher_time_spent_waiting = + prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_dispatcher_time_spent_waiting", + "Time (in micros) spent waiting for the dispatcher to obtain work", + ))?; + let queue_monitor_time_spent_running = + prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_queue_monitor_time_spent_running", + "Time (in micros) spent running the queue monitor", + ))?; + let queue_monitor_time_spent_waiting = + prometheus::IntCounter::with_opts(prometheus::Opts::new( + "hydraqueuerunner_queue_monitor_time_spent_waiting", + "Time (in micros) spent waiting for the queue monitor to obtain work", + ))?; + + let nr_builds_read = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_builds_read", + "hydra_queue_builds_read", + ))?; + let build_read_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_builds_time", + "hydra_queue_builds_time", + ))?; + let nr_builds_unfinished = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_builds_unfinished", + "hydra_queue_builds_unfinished", + ))?; + let nr_builds_done = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_builds_finished", + "hydra_queue_builds_finished", + ))?; + let nr_steps_started = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_started", + "hydra_queue_steps_started", + ))?; + let nr_steps_done = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_finished", + "hydra_queue_steps_finished", + ))?; + let nr_steps_building = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_building", + "hydra_queue_steps_building", + ))?; + let nr_steps_waiting = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_waiting", + "hydra_queue_steps_waiting", + ))?; + let nr_steps_runnable = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_runnable", + "hydra_queue_steps_runnable", + ))?; + let nr_steps_unfinished = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_unfinished", + "hydra_queue_steps_unfinished", + ))?; + let nr_unsupported_steps = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_unsupported", + "hydra_queue_steps_unsupported", + ))?; + let nr_unsupported_steps_aborted = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_unsupported_aborted", + "hydra_queue_steps_unsupported_aborted", + ))?; + let nr_substitutes_started = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_substitutes_started", + "hydra_queue_substitutes_started", + ))?; + let nr_substitutes_failed = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_substitutes_failed", + "hydra_queue_substitutes_failed", + ))?; + let nr_substitutes_succeeded = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_substitutes_succeeded", + "hydra_queue_substitutes_succeeded", + ))?; + let nr_retries = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_retries", + "hydra_queue_steps_retries", + ))?; + let max_nr_retries = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_max_retries", + "hydra_queue_steps_max_retries", + ))?; + let avg_step_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_avg_time_ms", + "hydra_queue_steps_avg_time_ms", + ))?; + let avg_step_import_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_avg_import_time_ms", + "hydra_queue_steps_avg_import_time_ms", + ))?; + let avg_step_build_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_avg_build_time_ms", + "hydra_queue_steps_avg_build_time_ms", + ))?; + let total_step_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_total_time_ms", + "hydra_queue_steps_total_time_ms", + ))?; + let total_step_import_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_total_import_time_ms", + "hydra_queue_steps_total_import_time_ms", + ))?; + let total_step_build_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_steps_total_build_time_ms", + "hydra_queue_steps_total_build_time_ms", + ))?; + let nr_queue_wakeups = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_checks", + "hydra_queue_checks", + ))?; + let nr_dispatcher_wakeups = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_dispatch_wakeup", + "hydra_queue_dispatch_wakeup", + ))?; + let dispatch_time_ms = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_dispatch_time", + "hydra_queue_dispatch_time", + ))?; + let machines_total = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_machines_total", + "hydra_queue_machines_total", + ))?; + let machines_in_use = prometheus::IntGauge::with_opts(prometheus::Opts::new( + "hydra_queue_machines_in_use", + "hydra_queue_machines_in_use", + ))?; + let runnable_per_machine_type = prometheus::IntGaugeVec::new( + prometheus::Opts::new( + "hydra_queue_machines_runnable", + "hydra_queue_machines_runnable", + ), + &["machine_type"], + )?; + let running_per_machine_type = prometheus::IntGaugeVec::new( + prometheus::Opts::new( + "hydra_queue_machines_running", + "hydra_queue_machines_running", + ), + &["machine_type"], + )?; + + // TODO: per machine metrics + + let r = prometheus::Registry::new(); + r.register(Box::new(queue_checks_started.clone()))?; + r.register(Box::new(queue_build_loads.clone()))?; + r.register(Box::new(queue_steps_created.clone()))?; + r.register(Box::new(queue_checks_early_exits.clone()))?; + r.register(Box::new(queue_checks_finished.clone()))?; + r.register(Box::new(dispatcher_time_spent_running.clone()))?; + r.register(Box::new(dispatcher_time_spent_waiting.clone()))?; + r.register(Box::new(queue_monitor_time_spent_running.clone()))?; + r.register(Box::new(queue_monitor_time_spent_waiting.clone()))?; + r.register(Box::new(nr_builds_read.clone()))?; + r.register(Box::new(build_read_time_ms.clone()))?; + r.register(Box::new(nr_builds_unfinished.clone()))?; + r.register(Box::new(nr_builds_done.clone()))?; + r.register(Box::new(nr_steps_started.clone()))?; + r.register(Box::new(nr_steps_done.clone()))?; + r.register(Box::new(nr_steps_building.clone()))?; + r.register(Box::new(nr_steps_waiting.clone()))?; + r.register(Box::new(nr_steps_runnable.clone()))?; + r.register(Box::new(nr_steps_unfinished.clone()))?; + r.register(Box::new(nr_unsupported_steps.clone()))?; + r.register(Box::new(nr_unsupported_steps_aborted.clone()))?; + r.register(Box::new(nr_substitutes_started.clone()))?; + r.register(Box::new(nr_substitutes_failed.clone()))?; + r.register(Box::new(nr_substitutes_succeeded.clone()))?; + r.register(Box::new(nr_retries.clone()))?; + r.register(Box::new(max_nr_retries.clone()))?; + r.register(Box::new(avg_step_time_ms.clone()))?; + r.register(Box::new(avg_step_import_time_ms.clone()))?; + r.register(Box::new(avg_step_build_time_ms.clone()))?; + r.register(Box::new(total_step_time_ms.clone()))?; + r.register(Box::new(total_step_import_time_ms.clone()))?; + r.register(Box::new(total_step_build_time_ms.clone()))?; + r.register(Box::new(nr_queue_wakeups.clone()))?; + r.register(Box::new(nr_dispatcher_wakeups.clone()))?; + r.register(Box::new(dispatch_time_ms.clone()))?; + r.register(Box::new(machines_total.clone()))?; + r.register(Box::new(machines_in_use.clone()))?; + r.register(Box::new(runnable_per_machine_type.clone()))?; + r.register(Box::new(running_per_machine_type.clone()))?; + + Ok(Self { + registry: r, + queue_checks_started, + queue_build_loads, + queue_steps_created, + queue_checks_early_exits, + queue_checks_finished, + dispatcher_time_spent_running, + dispatcher_time_spent_waiting, + queue_monitor_time_spent_running, + queue_monitor_time_spent_waiting, + nr_builds_read, + build_read_time_ms, + nr_builds_done, + nr_builds_unfinished, + nr_steps_started, + nr_steps_done, + nr_steps_building, + nr_steps_waiting, + nr_steps_runnable, + nr_steps_unfinished, + nr_unsupported_steps, + nr_unsupported_steps_aborted, + nr_substitutes_started, + nr_substitutes_failed, + nr_substitutes_succeeded, + nr_retries, + max_nr_retries, + avg_step_time_ms, + avg_step_import_time_ms, + avg_step_build_time_ms, + total_step_time_ms, + total_step_import_time_ms, + total_step_build_time_ms, + nr_queue_wakeups, + nr_dispatcher_wakeups, + dispatch_time_ms, + machines_total, + machines_in_use, + runnable_per_machine_type, + running_per_machine_type, + }) + } + + pub async fn refresh_dynamic_metrics(&self, state: &Arc) { + let nr_steps_done = self.nr_steps_done.get(); + if nr_steps_done > 0 { + self.avg_step_time_ms + .set(self.total_step_time_ms.get() / nr_steps_done); + self.avg_step_import_time_ms + .set(self.total_step_import_time_ms.get() / nr_steps_done); + self.avg_step_build_time_ms + .set(self.total_step_build_time_ms.get() / nr_steps_done); + } + + if let Ok(v) = i64::try_from(state.get_nr_builds_unfinished()) { + self.nr_builds_unfinished.set(v); + } + if let Ok(v) = i64::try_from(state.get_nr_steps_unfinished()) { + self.nr_steps_unfinished.set(v); + } + if let Ok(v) = i64::try_from(state.get_nr_runnable()) { + self.nr_steps_runnable.set(v); + } + if let Ok(v) = i64::try_from(state.machines.get_machine_count()) { + self.machines_total.set(v); + } + if let Ok(v) = i64::try_from(state.machines.get_machine_count_in_use()) { + self.machines_in_use.set(v); + } + + { + let queue_stats = state.queues.read().await.get_stats_per_queue(); + self.runnable_per_machine_type.reset(); + self.running_per_machine_type.reset(); + for (t, s) in queue_stats { + if let Ok(v) = i64::try_from(s.total_runnable) { + self.runnable_per_machine_type + .with_label_values(&[t.clone()]) + .set(v); + } + if let Ok(v) = i64::try_from(s.active_runnable) { + self.running_per_machine_type.with_label_values(&[t]).set(v); + } + } + } + } + + pub async fn gather_metrics(&self, state: &Arc) -> anyhow::Result> { + self.refresh_dynamic_metrics(state).await; + + let mut buffer = Vec::new(); + let encoder = prometheus::TextEncoder::new(); + let metric_families = self.registry.gather(); + encoder.encode(&metric_families, &mut buffer)?; + + Ok(buffer) + } + + pub fn add_to_total_step_time_ms(&self, v: u128) { + if let Ok(v) = i64::try_from(v) { + self.total_step_time_ms.add(v); + } + } + + pub fn add_to_total_step_import_time_ms(&self, v: u128) { + if let Ok(v) = i64::try_from(v) { + self.total_step_import_time_ms.add(v); + } + } + + pub fn add_to_total_step_build_time_ms(&self, v: u128) { + if let Ok(v) = i64::try_from(v) { + self.total_step_build_time_ms.add(v); + } + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/mod.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/mod.rs new file mode 100644 index 000000000..22438e433 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/mod.rs @@ -0,0 +1,2158 @@ +mod atomic; +mod build; +mod jobset; +mod machine; +mod metrics; +mod queue; +mod uploader; + +pub use atomic::AtomicDateTime; +pub use build::{Build, BuildOutput, BuildResultState, RemoteBuild, Step}; +pub use jobset::{Jobset, JobsetID}; +pub use machine::{Machine, Message as MachineMessage, Pressure, Stats as MachineStats}; +pub use queue::{BuildQueueStats, StepInfo}; + +use std::sync::atomic::{AtomicI64, Ordering}; +use std::time::Instant; +use std::{sync::Arc, sync::Weak}; + +use ahash::{AHashMap, AHashSet}; +use db::models::{BuildID, BuildStatus}; +use futures::TryStreamExt as _; +use nix_utils::BaseStore as _; +use secrecy::ExposeSecret as _; + +use crate::config::{App, Args}; +use crate::state::build::get_mark_build_sccuess_data; +use crate::state::jobset::SCHEDULING_WINDOW; +use crate::utils::finish_build_step; +use machine::Machines; + +pub type System = String; + +enum CreateStepResult { + None, + Valid(Arc), + PreviousFailure(Arc), +} + +enum RealiseStepResult { + None, + Valid(Arc), + MaybeCancelled, + CachedFailure, +} + +pub struct State { + pub store: nix_utils::LocalStore, + pub remote_stores: parking_lot::RwLock>, + pub config: App, + pub args: Args, + pub db: db::Database, + + pub machines: Machines, + + pub log_dir: std::path::PathBuf, + + // hardcoded values fromold queue runner + // pub maxParallelCopyClosure: u32 = 4; + // pub maxUnsupportedTime: u32 = 0; + pub builds: parking_lot::RwLock>>, + // Projectname, Jobsetname + pub jobsets: parking_lot::RwLock>>, + pub steps: parking_lot::RwLock>>, + pub queues: tokio::sync::RwLock, + + pub started_at: chrono::DateTime, + + pub metrics: metrics::PromMetrics, + pub notify_dispatch: tokio::sync::Notify, + pub uploader: uploader::Uploader, +} + +impl State { + pub async fn new( + reload_handle: tracing_subscriber::reload::Handle< + tracing_subscriber::EnvFilter, + tracing_subscriber::Registry, + >, + ) -> anyhow::Result> { + let store = nix_utils::LocalStore::init(); + nix_utils::set_verbosity(1); + let args = Args::new(); + if args.status { + let _ = reload_handle + .modify(|filter| *filter = tracing_subscriber::filter::EnvFilter::new("error")); + } + + let config = App::init(&args.config_path)?; + let log_dir = config.get_hydra_log_dir(); + let db = db::Database::new( + config.get_db_url().expose_secret(), + config.get_max_db_connections(), + ) + .await?; + + let _ = tokio::fs::create_dir_all(&log_dir).await; + Ok(Arc::new(Self { + store, + remote_stores: parking_lot::RwLock::new( + config + .get_remote_store_addrs() + .iter() + .map(|v| nix_utils::RemoteStore::init(v)) + .collect(), + ), + config, + args, + db, + machines: Machines::new(), + log_dir, + builds: parking_lot::RwLock::new(AHashMap::new()), + jobsets: parking_lot::RwLock::new(AHashMap::new()), + steps: parking_lot::RwLock::new(AHashMap::new()), + queues: tokio::sync::RwLock::new(queue::Queues::new()), + started_at: chrono::Utc::now(), + metrics: metrics::PromMetrics::new()?, + notify_dispatch: tokio::sync::Notify::new(), + uploader: uploader::Uploader::new(), + })) + } + + pub fn reload_config_callback( + &self, + new_config: &crate::config::PreparedApp, + ) -> anyhow::Result<()> { + // IF this gets more complex we need a way to trap the state and revert. + // right now it doesnt matter because only reconfigure_pool can fail and this is the first + // thing we do. + + let curr_db_url = self.config.get_db_url(); + let curr_sort_fn = self.config.get_sort_fn(); + let curr_remote_stores = self.config.get_remote_store_addrs(); + if curr_db_url.expose_secret() != new_config.db_url.expose_secret() { + self.db + .reconfigure_pool(new_config.db_url.expose_secret())?; + } + if curr_sort_fn != new_config.machine_sort_fn { + self.machines.sort(new_config.machine_sort_fn); + } + if curr_remote_stores != new_config.remote_store_addr { + let mut remote_stores = self.remote_stores.write(); + *remote_stores = new_config + .remote_store_addr + .iter() + .map(|v| nix_utils::RemoteStore::init(v)) + .collect(); + } + Ok(()) + } + + pub fn get_nr_builds_unfinished(&self) -> usize { + self.builds.read().len() + } + + pub fn get_nr_steps_unfinished(&self) -> usize { + let mut steps = self.steps.write(); + steps.retain(|_, s| s.upgrade().is_some()); + steps.len() + } + + pub fn get_nr_runnable(&self) -> usize { + let mut steps = self.steps.write(); + steps.retain(|_, s| s.upgrade().is_some()); + steps + .iter() + .filter_map(|(_, s)| s.upgrade().map(|v| v.get_runnable())) + .filter(|v| *v) + .count() + } + + #[tracing::instrument(skip(self, machine))] + pub async fn insert_machine(&self, machine: Machine) -> uuid::Uuid { + let machine_id = self + .machines + .insert_machine(machine, self.config.get_sort_fn()); + self.trigger_dispatch(); + machine_id + } + + #[tracing::instrument(skip(self))] + pub async fn remove_machine(&self, machine_id: uuid::Uuid) { + if let Some(m) = self.machines.remove_machine(machine_id) { + let jobs = { + let jobs = m.jobs.read(); + jobs.clone() + }; + for job in &jobs { + if let Err(e) = self + .fail_step( + Some(machine_id), + &job.path, + // we fail this with preparing because we kinda want to restart all jobs if + // a machine is removed + BuildResultState::PreparingFailure, + std::time::Duration::from_secs(0), + std::time::Duration::from_secs(0), + ) + .await + { + log::error!( + "Failed to fail step machine_id={machine_id} drv={} e={e}", + job.path + ); + } + } + } + } + + pub async fn remove_all_machines(&self) { + for m in self.machines.get_all_machines() { + self.remove_machine(m.id).await; + } + } + + pub async fn clear_busy(&self) -> anyhow::Result<()> { + let mut db = self.db.get().await?; + db.clear_busy(0).await?; + Ok(()) + } + + #[tracing::instrument(skip(self, step_info, system), err)] + async fn realise_drv_on_valid_machine( + &self, + step_info: Arc, + system: &System, + ) -> anyhow::Result { + let drv = step_info.step.get_drv_path(); + let free_fn = self.config.get_free_fn(); + + let Some(machine) = self.machines.get_machine_for_system( + system, + &step_info.step.get_required_features(), + free_fn, + ) else { + log::debug!("No free machine found for system={system} drv={drv}"); + return Ok(RealiseStepResult::None); + }; + + let mut build_options = nix_utils::BuildOptions::new(None); + let build_id = { + let mut dependents = AHashSet::new(); + let mut steps = AHashSet::new(); + step_info.step.get_dependents(&mut dependents, &mut steps); + + if dependents.is_empty() { + // Apparently all builds that depend on this derivation are gone (e.g. cancelled). So + // don't bother. This is very unlikely to happen, because normally Steps are only kept + // alive by being reachable from a Build. However, it's possible that a new Build just + // created a reference to this step. So to handle that possibility, we retry this step + // (putting it back in the runnable queue). If there are really no strong pointers to + // the step, it will be deleted. + log::info!("maybe cancelling build step {drv}"); + return Ok(RealiseStepResult::MaybeCancelled); + } + + let Some(build) = dependents + .iter() + .find(|b| &b.drv_path == drv) + .or(dependents.iter().next()) + else { + // this should never happen, as we checked is_empty above and fallback is just any build + return Ok(RealiseStepResult::MaybeCancelled); + }; + + // We want the biggest timeout otherwise we could build a step like llvm with a timeout + // of 180 because a nixostest with a timeout got scheduled and needs this step + let biggest_max_silent_time = dependents.iter().map(|x| x.max_silent_time).max(); + let biggest_build_timeout = dependents.iter().map(|x| x.timeout).max(); + + build_options + .set_max_silent_time(biggest_max_silent_time.unwrap_or(build.max_silent_time)); + build_options.set_build_timeout(biggest_build_timeout.unwrap_or(build.timeout)); + build.id + }; + + let mut job = machine::Job::new( + build_id, + drv.to_owned(), + step_info.resolved_drv_path.clone(), + ); + job.result.start_time = Some(chrono::Utc::now()); + if self.check_cached_failure(step_info.step.clone()).await { + job.result.step_status = BuildStatus::CachedFailure; + self.inner_fail_job(drv, None, job, step_info.step.clone()) + .await?; + return Ok(RealiseStepResult::CachedFailure); + } + + self.construct_log_file_path(drv) + .await? + .to_str() + .ok_or(anyhow::anyhow!("failed to construct log path string."))? + .clone_into(&mut job.result.log_file); + let step_nr = { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + + let step_nr = tx + .create_build_step( + job.result.start_time.map(|s| s.timestamp()), + build_id, + &step_info.step.get_drv_path().get_full_path(), + step_info.step.get_system().as_deref(), + machine.hostname.clone(), + BuildStatus::Busy, + None, + None, + step_info + .step + .get_outputs() + .unwrap_or_default() + .into_iter() + .map(|o| (o.name, o.path.map(|s| s.get_full_path()))) + .collect(), + ) + .await?; + tx.commit().await?; + step_nr + }; + job.step_nr = step_nr; + + log::info!( + "Submitting build drv={drv} on machine={} hostname={} build_id={build_id} step_nr={step_nr}", + machine.id, + machine.hostname + ); + self.db + .get() + .await? + .update_build_step(db::models::UpdateBuildStep { + build_id, + step_nr, + status: db::models::StepStatus::Connecting, + }) + .await?; + machine.build_drv(job, &build_options).await?; + self.metrics.nr_steps_started.add(1); + self.metrics.nr_steps_building.add(1); + Ok(RealiseStepResult::Valid(machine)) + } + + #[tracing::instrument(skip(self), fields(%drv), err)] + async fn construct_log_file_path( + &self, + drv: &nix_utils::StorePath, + ) -> anyhow::Result { + let mut log_file = self.log_dir.clone(); + let (dir, file) = drv.base_name().split_at(2); + log_file.push(format!("{dir}/")); + let _ = tokio::fs::create_dir_all(&log_file).await; // create dir + log_file.push(file); + Ok(log_file) + } + + #[tracing::instrument(skip(self), fields(%drv), err)] + pub async fn new_log_file( + &self, + drv: &nix_utils::StorePath, + ) -> anyhow::Result { + let log_file = self.construct_log_file_path(drv).await?; + log::debug!("opening {log_file:?}"); + + Ok(tokio::fs::File::options() + .create(true) + .truncate(true) + .write(true) + .read(false) + .mode(0o666) + .open(log_file) + .await?) + } + + #[tracing::instrument(skip(self, new_ids, new_builds_by_id, new_builds_by_path))] + async fn process_new_builds( + &self, + new_ids: Vec, + new_builds_by_id: Arc>>>, + new_builds_by_path: AHashMap>, + ) { + let finished_drvs = Arc::new(parking_lot::RwLock::new( + AHashSet::::new(), + )); + + let starttime = chrono::Utc::now(); + for id in new_ids { + let build = { + let new_builds_by_id = new_builds_by_id.read(); + let Some(build) = new_builds_by_id.get(&id).cloned() else { + continue; + }; + build + }; + + let new_runnable = Arc::new(parking_lot::RwLock::new(AHashSet::>::new())); + let nr_added: Arc = Arc::new(0.into()); + let now = Instant::now(); + + self.create_build( + build, + nr_added.clone(), + new_builds_by_id.clone(), + &new_builds_by_path, + finished_drvs.clone(), + new_runnable.clone(), + ) + .await; + + // we should never run into this issue + #[allow(clippy::cast_possible_truncation)] + self.metrics + .build_read_time_ms + .add(now.elapsed().as_millis() as i64); + + { + let new_runnable = new_runnable.read(); + log::info!( + "got {} new runnable steps from {} new builds", + new_runnable.len(), + nr_added.load(Ordering::Relaxed) + ); + for r in new_runnable.iter() { + r.make_runnable(); + } + } + + self.metrics + .nr_builds_read + .add(nr_added.load(Ordering::Relaxed)); + let stop_queue_run_after = self.config.get_stop_queue_run_after(); + + if let Some(stop_queue_run_after) = stop_queue_run_after { + if chrono::Utc::now() > (starttime + stop_queue_run_after) { + self.metrics.queue_checks_early_exits.inc(); + break; + } + } + } + + { + // This is here to ensure that we dont have any deps to finished steps + // This can happen because step creation is async and is_new can return a step that is + // still undecided if its finished or not. + let steps = self.steps.read(); + for (_, s) in steps.iter() { + let Some(s) = s.upgrade() else { + continue; + }; + if s.get_finished() && !s.get_previous_failure() { + s.make_rdeps_runnable(); + } + // TODO: if previous failure we should propably also remove from deps + } + } + + // we can just always trigger dispatch as we might have a free machine and its cheap + self.metrics.queue_checks_finished.inc(); + self.trigger_dispatch(); + } + + #[tracing::instrument(skip(self), err)] + async fn process_queue_change(&self) -> anyhow::Result<()> { + let mut db = self.db.get().await?; + let curr_ids = db + .get_not_finished_builds_fast() + .await? + .into_iter() + .map(|b| (b.id, b.globalpriority)) + .collect::>(); + + { + let mut builds = self.builds.write(); + builds.retain(|k, _| curr_ids.contains_key(k)); + for (id, build) in builds.iter() { + let Some(new_priority) = curr_ids.get(id) else { + // we should never get into this case because of the retain above + continue; + }; + + if build.global_priority.load(Ordering::Relaxed) < *new_priority { + log::info!("priority of build {id} increased"); + build + .global_priority + .store(*new_priority, Ordering::Relaxed); + build.propagate_priorities(); + } + } + } + + let queues = self.queues.read().await; + let cancelled_steps = queues.kill_active_steps().await; + for (drv_path, machine_id) in cancelled_steps { + if let Err(e) = self + .fail_step( + Some(machine_id), + &drv_path, + BuildResultState::Cancelled, + std::time::Duration::from_secs(0), + std::time::Duration::from_secs(0), + ) + .await + { + log::error!("Failed to abort step machine_id={machine_id} drv={drv_path} e={e}",); + } + } + Ok(()) + } + + #[tracing::instrument(skip(self), fields(%drv_path))] + pub async fn queue_one_build( + &self, + jobset_id: i32, + drv_path: &nix_utils::StorePath, + ) -> anyhow::Result<()> { + let mut db = self.db.get().await?; + let drv = nix_utils::query_drv(drv_path) + .await? + .ok_or(anyhow::anyhow!("drv not found"))?; + db.insert_debug_build(jobset_id, &drv_path.get_full_path(), &drv.system) + .await?; + + let mut tx = db.begin_transaction().await?; + tx.notify_builds_added().await?; + tx.commit().await?; + Ok(()) + } + + #[tracing::instrument(skip(self), err)] + pub async fn get_queued_builds(&self) -> anyhow::Result<()> { + self.metrics.queue_checks_started.inc(); + + let mut new_ids = Vec::::new(); + let mut new_builds_by_id = AHashMap::>::new(); + let mut new_builds_by_path = AHashMap::>::default(); + + { + let mut conn = self.db.get().await?; + for b in conn.get_not_finished_builds().await? { + let jobset = self + .create_jobset(&mut conn, b.jobset_id, &b.project, &b.jobset) + .await?; + let build = Build::new(b, jobset)?; + new_ids.push(build.id); + new_builds_by_id.insert(build.id, build.clone()); + new_builds_by_path + .entry(build.drv_path.clone()) + .or_insert_with(AHashSet::new) + .insert(build.id); + } + } + log::debug!("new_ids: {new_ids:?}"); + log::debug!("new_builds_by_id: {new_builds_by_id:?}"); + log::debug!("new_builds_by_path: {new_builds_by_path:?}"); + + let new_builds_by_id = Arc::new(parking_lot::RwLock::new(new_builds_by_id)); + self.process_new_builds(new_ids, new_builds_by_id, new_builds_by_path) + .await; + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn start_queue_monitor_loop(self: Arc) -> tokio::task::AbortHandle { + let task = tokio::task::spawn({ + async move { + if let Err(e) = self.queue_monitor_loop().await { + log::error!("Failed to spawn queue monitor loop. e={e}"); + } + } + }); + task.abort_handle() + } + + #[tracing::instrument(skip(self), err)] + async fn queue_monitor_loop(&self) -> anyhow::Result<()> { + let mut listener = self + .db + .listener(vec![ + "builds_added", + "builds_restarted", + "builds_cancelled", + "builds_deleted", + "builds_bumped", + "jobset_shares_changed", + ]) + .await?; + + loop { + let before_work = Instant::now(); + self.store.clear_path_info_cache(); + if let Err(e) = self.get_queued_builds().await { + log::error!("get_queue_builds failed inside queue monitor loop: {e}"); + continue; + } + + #[allow(clippy::cast_possible_truncation)] + self.metrics + .queue_monitor_time_spent_running + .inc_by(before_work.elapsed().as_micros() as u64); + + let before_sleep = Instant::now(); + let queue_trigger_timer = self.config.get_queue_trigger_timer(); + let notification = if let Some(timer) = queue_trigger_timer { + tokio::select! { + () = tokio::time::sleep(timer) => {"timer_reached".into()}, + v = listener.try_next() => match v { + Ok(Some(v)) => v.channel().to_owned(), + Ok(None) => continue, + Err(e) => { + log::warn!("PgListener failed with e={e}"); + continue; + } + }, + } + } else { + match listener.try_next().await { + Ok(Some(v)) => v.channel().to_owned(), + Ok(None) => continue, + Err(e) => { + log::warn!("PgListener failed with e={e}"); + continue; + } + } + }; + self.metrics.nr_queue_wakeups.add(1); + log::trace!("New notification from PgListener. notification={notification:?}"); + + match notification.as_ref() { + "builds_added" => log::debug!("got notification: new builds added to the queue"), + "builds_restarted" => log::debug!("got notification: builds restarted"), + "builds_cancelled" | "builds_deleted" | "builds_bumped" => { + log::debug!("got notification: builds cancelled or bumped"); + if let Err(e) = self.process_queue_change().await { + log::error!("Failed to process queue change. e={e}"); + } + } + "jobset_shares_changed" => { + log::debug!("got notification: jobset shares changed"); + if let Err(e) = self.handle_jobset_change().await { + log::error!("Failed to handle jobset change. e={e}"); + } + } + _ => (), + } + + #[allow(clippy::cast_possible_truncation)] + self.metrics + .queue_monitor_time_spent_waiting + .inc_by(before_sleep.elapsed().as_micros() as u64); + } + } + + #[tracing::instrument(skip(self))] + pub fn start_dispatch_loop(self: Arc) -> tokio::task::AbortHandle { + let task = tokio::task::spawn({ + async move { + loop { + let before_sleep = Instant::now(); + let dispatch_trigger_timer = self.config.get_dispatch_trigger_timer(); + if let Some(timer) = dispatch_trigger_timer { + tokio::select! { + () = self.notify_dispatch.notified() => {}, + () = tokio::time::sleep(timer) => {}, + }; + } else { + self.notify_dispatch.notified().await; + } + log::info!("starting dispatch"); + + #[allow(clippy::cast_possible_truncation)] + self.metrics + .dispatcher_time_spent_waiting + .inc_by(before_sleep.elapsed().as_micros() as u64); + + self.metrics.nr_dispatcher_wakeups.add(1); + let before_work = Instant::now(); + self.do_dispatch_once().await; + + let elapsed = before_work.elapsed(); + + #[allow(clippy::cast_possible_truncation)] + self.metrics + .dispatcher_time_spent_running + .inc_by(elapsed.as_micros() as u64); + + #[allow(clippy::cast_possible_truncation)] + self.metrics + .dispatch_time_ms + .add(elapsed.as_millis() as i64); + } + } + }); + task.abort_handle() + } + + #[tracing::instrument(skip(self), err)] + async fn dump_status_loop(self: Arc) -> anyhow::Result<()> { + let mut listener = self.db.listener(vec!["dump_status"]).await?; + + let state = self.clone(); + loop { + let _ = match listener.try_next().await { + Ok(Some(v)) => v, + Ok(None) => continue, + Err(e) => { + log::warn!("PgListener failed with e={e}"); + continue; + } + }; + + let state = state.clone(); + let queue_stats = crate::io::QueueRunnerStats::new(state.clone()).await; + let sort_fn = state.config.get_sort_fn(); + let free_fn = state.config.get_free_fn(); + let machines = state + .machines + .get_all_machines() + .into_iter() + .map(|m| { + ( + m.hostname.clone(), + crate::io::Machine::from_state(&m, sort_fn, free_fn), + ) + }) + .collect(); + let jobsets = { + let jobsets = state.jobsets.read(); + jobsets + .values() + .map(|v| (v.full_name(), v.clone().into())) + .collect() + }; + let remote_stores = { + let stores = state.remote_stores.read(); + stores.clone() + }; + let dump_status = crate::io::DumpResponse::new( + queue_stats, + machines, + jobsets, + &state.store, + &remote_stores, + ); + { + let Ok(mut db) = self.db.get().await else { + continue; + }; + let Ok(mut tx) = db.begin_transaction().await else { + continue; + }; + let dump_status = match serde_json::to_value(dump_status) { + Ok(v) => v, + Err(e) => { + log::error!("Failed to update status in database: {e}"); + continue; + } + }; + if let Err(e) = tx.upsert_status(&dump_status).await { + log::error!("Failed to update status in database: {e}"); + continue; + } + if let Err(e) = tx.notify_status_dumped().await { + log::error!("Failed to update status in database: {e}"); + continue; + } + if let Err(e) = tx.commit().await { + log::error!("Failed to update status in database: {e}"); + } + } + } + } + + #[tracing::instrument(skip(self))] + pub fn start_dump_status_loop(self: Arc) -> tokio::task::AbortHandle { + let task = tokio::task::spawn({ + async move { + if let Err(e) = self.dump_status_loop().await { + log::error!("Failed to spawn queue monitor loop. e={e}"); + } + } + }); + task.abort_handle() + } + + #[tracing::instrument(skip(self))] + pub fn start_uploader_queue(self: Arc) -> tokio::task::AbortHandle { + let task = tokio::task::spawn({ + async move { + loop { + let local_store = self.store.clone(); + let remote_stores = { + let r = self.remote_stores.read(); + r.clone() + }; + let limit = self.config.get_concurrent_upload_limit(); + if limit < 2 { + self.uploader.upload_once(local_store, remote_stores).await; + } else { + self.uploader + .upload_many(local_store, remote_stores, limit) + .await; + } + } + } + }); + task.abort_handle() + } + + #[tracing::instrument(skip(self))] + pub async fn get_status_from_main_process(self: Arc) -> anyhow::Result<()> { + let mut db = self.db.get().await?; + + let mut listener = self.db.listener(vec!["status_dumped"]).await?; + { + let mut tx = db.begin_transaction().await?; + tx.notify_dump_status().await?; + tx.commit().await?; + } + + let _ = match listener.try_next().await { + Ok(Some(v)) => v, + Ok(None) => return Ok(()), + Err(e) => { + log::warn!("PgListener failed with e={e}"); + return Ok(()); + } + }; + if let Some(status) = db.get_status().await? { + println!("{}", serde_json::to_string_pretty(&status)?); + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn trigger_dispatch(&self) { + self.notify_dispatch.notify_one(); + } + + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self))] + async fn do_dispatch_once(&self) { + // Prune old historical build step info from the jobsets. + { + let jobsets = self.jobsets.read(); + for ((project_name, jobset_name), jobset) in jobsets.iter() { + let s1 = jobset.share_used(); + jobset.prune_steps(); + let s2 = jobset.share_used(); + if (s1 - s2).abs() > f64::EPSILON { + log::debug!( + "pruned scheduling window of '{project_name}:{jobset_name}' from {s1} to {s2}" + ); + } + } + } + + let mut new_runnable = Vec::new(); + { + let mut steps = self.steps.write(); + steps.retain(|_, r| { + let Some(step) = r.upgrade() else { + return false; + }; + if step.get_runnable() { + new_runnable.push(step.clone()); + } + true + }); + } + + let now = chrono::Utc::now(); + let mut new_queues = AHashMap::>::default(); + for r in new_runnable { + let Some(system) = r.get_system() else { + continue; + }; + if r.atomic_state.tries.load(Ordering::Relaxed) > 0 { + continue; + } + let step_info = StepInfo::new(&self.store, r.clone()); + + new_queues + .entry(system) + .or_insert_with(Vec::new) + .push(step_info); + } + + { + let mut queues = self.queues.write().await; + for (system, jobs) in new_queues { + queues.insert_new_jobs(system, jobs, &now); + } + queues.remove_all_weak_pointer(); + } + + { + let mut nr_steps_waiting_all_queues = 0; + let inner_queues = { + // We clone the inner queues here to unlock it again fast for other jobs + let queues = self.queues.read().await; + queues.clone_inner() + }; + let sort_fn = self.config.get_sort_fn(); + for (system, queue) in inner_queues { + let mut nr_disabled = 0; + let mut nr_waiting = 0; + for job in queue.clone_inner() { + let Some(job) = job.upgrade() else { + continue; + }; + if job.get_already_scheduled() { + log::debug!( + "Can't schedule job because job is already scheduled system={system} drv={}", + job.step.get_drv_path() + ); + continue; + } + if job.step.get_finished() { + log::debug!( + "Can't schedule job because job is already finished system={system} drv={}", + job.step.get_drv_path() + ); + continue; + } + { + let after = job.step.get_after(); + if after > now { + nr_disabled += 1; + log::debug!( + "Can't schedule job because job is not yet ready system={system} drv={} after={after}", + job.step.get_drv_path(), + ); + continue; + } + } + + match self + .realise_drv_on_valid_machine(job.clone(), &system) + .await + { + Ok(RealiseStepResult::Valid(m)) => { + let queues = self.queues.read().await; + queues.add_job_to_scheduled(&job, &queue, m); + // if we sort after each successful schedule we basically get a least + // current builds as tie breaker, if we have the same score. + self.machines.sort(sort_fn); + } + Ok(RealiseStepResult::None) => { + log::debug!( + "Waiting for job to schedule because no builder is ready system={system} drv={}", + job.step.get_drv_path(), + ); + nr_waiting += 1; + nr_steps_waiting_all_queues += 1; + } + Ok( + RealiseStepResult::MaybeCancelled | RealiseStepResult::CachedFailure, + ) => { + // If this is maybe cancelled (and the cancellation is correct) it is + // enough to remove it from jobs which will then reduce the ref count + // to 0 as it has no dependents. + // If its a cached failure we need to also remove it from jobs, we + // already wrote cached failure into the db, at this point in time + let mut queues = self.queues.write().await; + queues.remove_job(&job, &queue); + } + Err(e) => { + log::warn!( + "Failed to realise drv on valid machine, will be skipped: drv={} e={e}", + job.step.get_drv_path(), + ); + } + } + queue.set_nr_runnable_waiting(nr_waiting); + queue.set_nr_runnable_disabled(nr_disabled); + } + } + self.metrics + .nr_steps_waiting + .set(nr_steps_waiting_all_queues); + } + + self.abort_unsupported().await; + } + + #[tracing::instrument(skip(self, machine_id, step_status), fields(%drv_path), err)] + pub async fn update_build_step( + &self, + machine_id: Option, + drv_path: &nix_utils::StorePath, + step_status: db::models::StepStatus, + ) -> anyhow::Result<()> { + let build_id_and_step_nr = if let Some(machine_id) = machine_id { + if let Some(m) = self.machines.get_machine_by_id(machine_id) { + log::debug!("get job from machine: drv_path={drv_path} m={}", m.id); + m.get_build_id_and_step_nr(drv_path) + } else { + None + } + } else { + None + }; + + let Some((build_id, step_nr)) = build_id_and_step_nr else { + log::warn!( + "Failed to find job with build_id and step_nr for machine_id={machine_id:?} drv_path={drv_path}." + ); + return Ok(()); + }; + self.db + .get() + .await? + .update_build_step(db::models::UpdateBuildStep { + build_id, + step_nr, + status: step_status, + }) + .await?; + Ok(()) + } + + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self, output), fields(%drv_path), err)] + pub async fn succeed_step( + &self, + machine_id: Option, + drv_path: &nix_utils::StorePath, + output: BuildOutput, + ) -> anyhow::Result<()> { + log::info!("marking job as done: drv_path={drv_path}"); + let (step_info, queue, machine) = { + let queues = self.queues.read().await; + queues + .remove_job_from_scheduled(drv_path) + .ok_or(anyhow::anyhow!("Step is missing in queues.scheduled"))? + }; + + step_info.step.set_finished(true); + self.metrics.nr_steps_done.add(1); + self.metrics.nr_steps_building.sub(1); + + log::debug!( + "removing job from machine: drv_path={drv_path} m={}", + machine.id + ); + let mut job = machine.remove_job(drv_path).ok_or(anyhow::anyhow!( + "Job is missing in machine.jobs m={}", + machine + ))?; + + { + let mut queues = self.queues.write().await; + queues.remove_job(&step_info, &queue); + } + + job.result.step_status = BuildStatus::Success; + job.result.stop_time = Some(chrono::Utc::now()); + { + let total_step_time = job.result.get_total_step_time_ms(); + machine + .stats + .add_to_total_step_time_ms(u128::from(total_step_time)); + machine + .stats + .add_to_total_step_import_time_ms(output.import_elapsed.as_millis()); + machine + .stats + .add_to_total_step_build_time_ms(output.build_elapsed.as_millis()); + machine.stats.reset_consecutive_failures(); + self.metrics + .add_to_total_step_time_ms(u128::from(total_step_time)); + self.metrics + .add_to_total_step_import_time_ms(output.import_elapsed.as_millis()); + self.metrics + .add_to_total_step_build_time_ms(output.build_elapsed.as_millis()); + } + + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + finish_build_step( + &mut tx, + job.build_id, + job.step_nr, + &job.result, + Some(machine.hostname.clone()), + ) + .await?; + tx.commit().await?; + } + + // TODO: can retry: builder.cc:260 + + for (_, path) in &output.outputs { + self.add_root(path); + } + + let has_stores = { + let r = self.remote_stores.read(); + !r.is_empty() + }; + if has_stores { + let outputs = output + .outputs + .values() + .map(Clone::clone) + .collect::>(); + + let _ = self.uploader.schedule_upload( + outputs, + format!("log/{}", job.path.base_name()), + // TODO: handle compression + job.result.log_file, + ); + } + + let mut direct = Vec::new(); + { + let state = step_info.step.state.read(); + for b in &state.builds { + let Some(b) = b.upgrade() else { + continue; + }; + if !b.get_finished_in_db() { + direct.push(b); + } + } + + if direct.is_empty() { + let mut steps = self.steps.write(); + steps.retain(|s, _| s != step_info.step.get_drv_path()); + } + } + + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + for b in &direct { + let is_cached = job.build_id != b.id || job.result.is_cached; + tx.mark_succeeded_build( + get_mark_build_sccuess_data(b, &output), + is_cached, + i32::try_from( + job.result + .start_time + .map(|s| s.timestamp()) + .unwrap_or_default(), + )?, // TODO + i32::try_from( + job.result + .stop_time + .map(|s| s.timestamp()) + .unwrap_or_default(), + )?, // TODO + ) + .await?; + self.metrics.nr_builds_done.add(1); + } + + tx.commit().await?; + } + + { + // Remove the direct dependencies from 'builds'. This will cause them to be + // destroyed. + let mut current_builds = self.builds.write(); + for b in &direct { + b.set_finished_in_db(true); + current_builds.remove(&b.id); + } + } + + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + for b in direct { + tx.notify_build_finished(b.id, &[]).await?; + } + + tx.commit().await?; + } + + step_info.step.make_rdeps_runnable(); + + // always trigger dispatch, as we now might have a free machine again + self.trigger_dispatch(); + + Ok(()) + } + + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self), fields(%drv_path), err)] + pub async fn fail_step( + &self, + machine_id: Option, + drv_path: &nix_utils::StorePath, + state: BuildResultState, + import_elapsed: std::time::Duration, + build_elapsed: std::time::Duration, + ) -> anyhow::Result<()> { + log::info!("removing job from running in system queue: drv_path={drv_path}"); + let (step_info, queue, machine) = { + let queues = self.queues.read().await; + queues + .remove_job_from_scheduled(drv_path) + .ok_or(anyhow::anyhow!("Step is missing in queues.scheduled"))? + }; + + step_info.step.set_finished(false); + self.metrics.nr_steps_done.add(1); + self.metrics.nr_steps_building.sub(1); + + log::debug!( + "removing job from machine: drv_path={drv_path} m={}", + machine.id + ); + let mut job = machine.remove_job(drv_path).ok_or(anyhow::anyhow!( + "Job is missing in machine.jobs m={}", + machine + ))?; + + job.result.step_status = BuildStatus::Failed; + // this can override step_status to something more specific + job.result.update_with_result_state(&state); + + // TODO: max failure count + let (max_retries, retry_interval, retry_backoff) = self.config.get_retry(); + + if job.result.can_retry { + step_info + .step + .atomic_state + .tries + .fetch_add(1, Ordering::Relaxed); + let tries = step_info.step.atomic_state.tries.load(Ordering::Relaxed); + if tries < max_retries { + // retry step + // TODO: update metrics: + // - build_step_time_ms, + // - total_step_time_ms, + // - maschine.build_step_time_ms, + // - maschine.total_step_time_ms, + // - maschine.last_failure + self.metrics.nr_retries.add(1); + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::cast_possible_truncation)] + let delta = (retry_interval * retry_backoff.powf((tries - 1) as f32)) as i64; + log::info!("will retry '{drv_path}' after {delta}s"); + step_info + .step + .set_after(chrono::Utc::now() + chrono::Duration::seconds(delta)); + if i64::from(tries) > self.metrics.max_nr_retries.get() { + self.metrics.max_nr_retries.set(i64::from(tries)); + } + + step_info.set_already_scheduled(false); + + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + finish_build_step( + &mut tx, + job.build_id, + job.step_nr, + &job.result, + Some(machine.hostname.clone()), + ) + .await?; + tx.commit().await?; + } + self.trigger_dispatch(); + return Ok(()); + } + } + + // remove job from queues, aka actually fail the job + { + let mut queues = self.queues.write().await; + queues.remove_job(&step_info, &queue); + } + + machine + .stats + .add_to_total_step_build_time_ms(build_elapsed.as_millis()); + machine + .stats + .add_to_total_step_import_time_ms(import_elapsed.as_millis()); + self.metrics + .add_to_total_step_build_time_ms(build_elapsed.as_millis()); + self.metrics + .add_to_total_step_import_time_ms(import_elapsed.as_millis()); + + self.inner_fail_job(drv_path, Some(machine), job, step_info.step.clone()) + .await + } + + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip(self, machine, job, step), fields(%drv_path), err)] + async fn inner_fail_job( + &self, + drv_path: &nix_utils::StorePath, + machine: Option>, + mut job: machine::Job, + step: Arc, + ) -> anyhow::Result<()> { + job.result.stop_time = Some(chrono::Utc::now()); + { + let total_step_time = job.result.get_total_step_time_ms(); + self.metrics + .add_to_total_step_time_ms(u128::from(total_step_time)); + if let Some(machine) = &machine { + machine + .stats + .add_to_total_step_time_ms(u128::from(total_step_time)); + machine.stats.store_last_failure_now(); + } + } + + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + finish_build_step( + &mut tx, + job.build_id, + job.step_nr, + &job.result, + machine.as_ref().map(|m| m.hostname.clone()), + ) + .await?; + tx.commit().await?; + } + + // TODO: builder:415 + let mut dependent_ids = Vec::new(); + loop { + let indirect = self.get_all_indirect_builds(&step); + // TODO: stepFinished ? + if indirect.is_empty() { + break; + } + + // Create failed build steps for every build that depends on this, except when this + // step is cached and is the top-level of that build (since then it's redundant with + // the build's isCachedBuild field). + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + for b in &indirect { + if (job.result.step_status == BuildStatus::CachedFailure + && &b.drv_path == step.get_drv_path()) + || ((job.result.step_status != BuildStatus::CachedFailure + && job.result.step_status != BuildStatus::Unsupported) + && job.build_id == b.id) + || b.get_finished_in_db() + { + continue; + } + + tx.create_build_step( + None, + b.id, + &step.get_drv_path().get_full_path(), + step.get_system().as_deref(), + machine + .as_deref() + .map(|m| m.hostname.clone()) + .unwrap_or_default(), + job.result.step_status, + job.result.error_msg.clone(), + if job.build_id == b.id { + None + } else { + Some(job.build_id) + }, + step.get_outputs() + .unwrap_or_default() + .into_iter() + .map(|o| (o.name, o.path.map(|s| s.get_full_path()))) + .collect(), + ) + .await?; + } + + // Mark all builds that depend on this derivation as failed. + for b in &indirect { + if b.get_finished_in_db() { + continue; + } + + log::info!("marking build {} as failed", b.id); + tx.update_build_after_failure( + b.id, + if &b.drv_path != step.get_drv_path() + && job.result.step_status == BuildStatus::Failed + { + BuildStatus::DepFailed + } else { + job.result.step_status + }, + i32::try_from( + job.result + .start_time + .map(|s| s.timestamp()) + .unwrap_or_default(), + )?, // TODO + i32::try_from( + job.result + .stop_time + .map(|s| s.timestamp()) + .unwrap_or_default(), + )?, // TODO + job.result.step_status == BuildStatus::CachedFailure, + ) + .await?; + self.metrics.nr_builds_done.add(1); + } + + // Remember failed paths in the database so that they won't be built again. + if job.result.step_status == BuildStatus::CachedFailure && job.result.can_cache { + for o in step.get_outputs().unwrap_or_default() { + let Some(p) = o.path else { continue }; + tx.insert_failed_paths(&p.get_full_path()).await?; + } + } + + tx.commit().await?; + } + + { + // Remove the indirect dependencies from 'builds'. This will cause them to be + // destroyed. + let mut current_builds = self.builds.write(); + for b in indirect { + b.set_finished_in_db(true); + current_builds.remove(&b.id); + dependent_ids.push(b.id); + } + } + } + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + tx.notify_build_finished(job.build_id, &dependent_ids) + .await?; + tx.commit().await?; + } + + // trigger dispatch, as we now have a free mashine again + self.trigger_dispatch(); + + Ok(()) + } + + #[tracing::instrument(skip(self, step))] + fn get_all_indirect_builds(&self, step: &Arc) -> AHashSet> { + let mut indirect = AHashSet::new(); + let mut steps = AHashSet::new(); + step.get_dependents(&mut indirect, &mut steps); + + // If there are no builds left, delete all referring + // steps from ‘steps’. As for the success case, we can + // be certain no new referrers can be added. + if indirect.is_empty() { + let mut current_steps_map = self.steps.write(); + for s in steps { + let drv = s.get_drv_path(); + log::debug!("finishing build step '{drv}'"); + current_steps_map.retain(|path, _| path != drv); + } + } + + indirect + } + + #[tracing::instrument(skip(self, conn), err)] + async fn create_jobset( + &self, + conn: &mut db::Connection, + jobset_id: i32, + project_name: &str, + jobset_name: &str, + ) -> anyhow::Result> { + let key = (project_name.to_owned(), jobset_name.to_owned()); + { + let jobsets = self.jobsets.read(); + if let Some(jobset) = jobsets.get(&key) { + return Ok(jobset.clone()); + } + } + + let shares = conn + .get_jobset_scheduling_shares(jobset_id) + .await? + .ok_or(anyhow::anyhow!( + "Scheduling Shares not found for jobset not found." + ))?; + let jobset = Jobset::new(jobset_id, project_name, jobset_name); + jobset.set_shares(shares)?; + + for step in conn + .get_jobset_build_steps(jobset_id, SCHEDULING_WINDOW) + .await? + { + let Some(starttime) = step.starttime else { + continue; + }; + let Some(stoptime) = step.stoptime else { + continue; + }; + jobset.add_step(i64::from(starttime), i64::from(stoptime - starttime)); + } + + let jobset = Arc::new(jobset); + { + let mut jobsets = self.jobsets.write(); + jobsets.insert(key, jobset.clone()); + } + + Ok(jobset.clone()) + } + + #[tracing::instrument(skip(self, build, step), err)] + async fn handle_previous_failure( + &self, + build: Arc, + step: Arc, + ) -> anyhow::Result<()> { + // Some step previously failed, so mark the build as failed right away. + log::warn!( + "marking build {} as cached failure due to '{}'", + build.id, + step.get_drv_path() + ); + if build.get_finished_in_db() { + return Ok(()); + } + + // if !build.finished_in_db + let mut conn = self.db.get().await?; + let mut tx = conn.begin_transaction().await?; + + // Find the previous build step record, first by derivation path, then by output + // path. + let mut propagated_from = tx + .get_last_build_step_id(&step.get_drv_path().get_full_path()) + .await? + .unwrap_or_default(); + + if propagated_from == 0 { + // we can access step.drv here because the value is always set if + // PreviousFailure is returned, so this should never yield None + + let outputs = step.get_outputs().unwrap_or_default(); + for o in outputs { + let res = if let Some(path) = &o.path { + tx.get_last_build_step_id_for_output_path(&path.get_full_path()) + .await + } else { + tx.get_last_build_step_id_for_output_with_drv( + &step.get_drv_path().get_full_path(), + &o.name, + ) + .await + }; + if let Ok(Some(res)) = res { + propagated_from = res; + break; + } + } + } + + tx.create_build_step( + None, + build.id, + &step.get_drv_path().get_full_path(), + step.get_system().as_deref(), + String::new(), + BuildStatus::CachedFailure, + None, + Some(propagated_from), + step.get_outputs() + .unwrap_or_default() + .into_iter() + .map(|o| (o.name, o.path.map(|s| s.get_full_path()))) + .collect(), + ) + .await?; + tx.update_build_after_previous_failure( + build.id, + if step.get_drv_path() == &build.drv_path { + BuildStatus::Failed + } else { + BuildStatus::DepFailed + }, + ) + .await?; + + let _ = tx.notify_build_finished(build.id, &[]).await; + tx.commit().await?; + + build.set_finished_in_db(true); + self.metrics.nr_builds_done.add(1); + Ok(()) + } + + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip( + self, + build, + nr_added, + new_builds_by_id, + new_builds_by_path, + finished_drvs, + new_runnable + ), fields(build_id=build.id))] + async fn create_build( + &self, + build: Arc, + nr_added: Arc, + new_builds_by_id: Arc>>>, + new_builds_by_path: &AHashMap>, + finished_drvs: Arc>>, + new_runnable: Arc>>>, + ) { + self.metrics.queue_build_loads.inc(); + log::info!("loading build {} ({})", build.id, build.full_job_name()); + nr_added.fetch_add(1, Ordering::Relaxed); + { + let mut new_builds_by_id = new_builds_by_id.write(); + new_builds_by_id.remove(&build.id); + } + + if !nix_utils::check_if_storepath_exists(&build.drv_path).await { + log::error!("aborting GC'ed build {}", build.id); + if !build.get_finished_in_db() { + match self.db.get().await { + Ok(mut conn) => { + if let Err(e) = conn.abort_build(build.id).await { + log::error!("Failed to abort the build={} e={}", build.id, e); + } + } + Err(e) => log::error!( + "Failed to get database connection so we can abort the build={} e={}", + build.id, + e + ), + } + } + + build.set_finished_in_db(true); + self.metrics.nr_builds_done.add(1); + return; + } + + // Create steps for this derivation and its dependencies. + let new_steps = Arc::new(parking_lot::RwLock::new(AHashSet::>::new())); + let step = match self + .create_step( + // conn, + build.clone(), + build.drv_path.clone(), + Some(build.clone()), + None, + finished_drvs.clone(), + new_steps.clone(), + new_runnable.clone(), + ) + .await + { + CreateStepResult::None => None, + CreateStepResult::Valid(dep) => Some(dep), + CreateStepResult::PreviousFailure(step) => { + if let Err(e) = self.handle_previous_failure(build, step).await { + log::error!("Failed to handle previous failure: {e}"); + } + return; + } + }; + + { + use futures::stream::StreamExt as _; + + let builds = { + let new_steps = new_steps.read(); + new_steps + .iter() + .filter_map(|r| Some(new_builds_by_path.get(r.get_drv_path())?.clone())) + .flatten() + .collect::>() + }; + let mut stream = futures::StreamExt::map(tokio_stream::iter(builds), |b| { + let nr_added = nr_added.clone(); + let new_builds_by_id = new_builds_by_id.clone(); + let finished_drvs = finished_drvs.clone(); + let new_runnable = new_runnable.clone(); + async move { + let j = { + let new_builds_by_id = new_builds_by_id.read(); + let Some(j) = new_builds_by_id.get(&b) else { + return; + }; + j.clone() + }; + + Box::pin(self.create_build( + j, + nr_added, + new_builds_by_id, + new_builds_by_path, + finished_drvs, + new_runnable, + )) + .await; + } + }) + .buffered(10); + while tokio_stream::StreamExt::next(&mut stream).await.is_some() {} + } + + if let Some(step) = step { + if !build.get_finished_in_db() { + let mut builds = self.builds.write(); + builds.insert(build.id, build.clone()); + } + + build.set_toplevel_step(step.clone()); + build.propagate_priorities(); + + let new_steps = new_steps.read(); + log::info!( + "added build {} (top-level step {}, {} new steps)", + build.id, + step.get_drv_path(), + new_steps.len() + ); + } else { + // If we didn't get a step, it means the step's outputs are + // all valid. So we mark this as a finished, cached build. + if let Err(e) = self.handle_cached_build(build).await { + log::error!("failed to handle cached build: {e}"); + } + } + } + + #[allow(clippy::too_many_arguments)] + #[allow(clippy::too_many_lines)] + #[tracing::instrument(skip( + self, + build, + referring_build, + referring_step, + finished_drvs, + new_steps, + new_runnable + ), fields(build_id=build.id, %drv_path))] + async fn create_step( + &self, + build: Arc, + drv_path: nix_utils::StorePath, + referring_build: Option>, + referring_step: Option>, + finished_drvs: Arc>>, + new_steps: Arc>>>, + new_runnable: Arc>>>, + ) -> CreateStepResult { + use futures::stream::StreamExt as _; + + { + let finished_drvs = finished_drvs.read(); + if finished_drvs.contains(&drv_path) { + return CreateStepResult::None; + } + } + + let mut is_new = false; + let step = { + let mut steps = self.steps.write(); + let step = if let Some(step) = steps.get(&drv_path) { + if let Some(step) = step.upgrade() { + step + } else { + steps.remove(&drv_path); + is_new = true; + Step::new(drv_path.clone()) + } + } else { + is_new = true; + Step::new(drv_path.clone()) + }; + + { + let mut state = step.state.write(); + if let Some(referring_build) = referring_build { + state.builds.push(Arc::downgrade(&referring_build)); + } + if let Some(referring_step) = referring_step { + state.rdeps.push(Arc::downgrade(&referring_step)); + } + } + + steps.insert(drv_path.clone(), Arc::downgrade(&step)); + step + }; + + if !is_new { + return CreateStepResult::Valid(step); + } + self.metrics.queue_steps_created.inc(); + log::debug!("considering derivation '{drv_path}'"); + + let Some(drv) = nix_utils::query_drv(&drv_path).await.ok().flatten() else { + return CreateStepResult::None; + }; + + let use_substitutes = self.config.get_use_substitutes(); + // TODO: check all remote stores + let remote_store = { + let r = self.remote_stores.read(); + r.first().cloned() + }; + let missing_outputs = if let Some(ref remote_store) = remote_store { + let mut missing = remote_store + .query_missing_remote_outputs(drv.outputs.clone()) + .await; + if !missing.is_empty() + && nix_utils::query_missing_outputs(drv.outputs.clone()) + .await + .is_empty() + { + // we have all paths locally, so we can just upload them to the remote_store + if let Ok(log_file) = self.construct_log_file_path(&drv_path).await { + let _ = self.uploader.schedule_upload( + missing.into_iter().filter_map(|v| v.path).collect(), + format!("log/{}", drv_path.base_name()), + log_file.to_string_lossy().to_string(), + ); + missing = vec![]; + } + } + + missing + } else { + nix_utils::query_missing_outputs(drv.outputs.clone()).await + }; + + step.set_drv(drv); + + if self.check_cached_failure(step.clone()).await { + step.set_previous_failure(true); + return CreateStepResult::PreviousFailure(step); + } + + log::debug!("missing outputs: {missing_outputs:?}"); + let mut finished = missing_outputs.is_empty(); + if !missing_outputs.is_empty() && use_substitutes { + use futures::stream::StreamExt as _; + + let mut substituted = 0; + let missing_outputs_len = missing_outputs.len(); + let build_opts = nix_utils::BuildOptions::substitute_only(); + + let mut stream = futures::StreamExt::map(tokio_stream::iter(missing_outputs), |o| { + self.metrics.nr_substitutes_started.inc(); + crate::utils::substitute_output( + self.db.clone(), + self.store.clone(), + o, + build.id, + &drv_path, + &build_opts, + remote_store.as_ref(), + ) + }) + .buffer_unordered(10); + while let Some(v) = tokio_stream::StreamExt::next(&mut stream).await { + match v { + Ok(()) => { + self.metrics.nr_substitutes_succeeded.inc(); + substituted += 1; + } + Err(e) => { + self.metrics.nr_substitutes_failed.inc(); + log::warn!("Failed to substitute path: {e}"); + } + } + } + finished = substituted == missing_outputs_len; + } + + if finished { + let mut finished_drvs = finished_drvs.write(); + finished_drvs.insert(drv_path.clone()); + step.set_finished(true); + return CreateStepResult::None; + } + + log::debug!("creating build step '{drv_path}"); + let Some(input_drvs) = step.get_input_drvs() else { + // this should never happen because we always a a drv set at this point in time + return CreateStepResult::None; + }; + + let step2 = step.clone(); + let mut stream = futures::StreamExt::map(tokio_stream::iter(input_drvs), |i| { + let build = build.clone(); + let step = step2.clone(); + let finished_drvs = finished_drvs.clone(); + let new_steps = new_steps.clone(); + let new_runnable = new_runnable.clone(); + async move { + let path = nix_utils::StorePath::new(&i); + Box::pin(self.create_step( + // conn, + build, + path, + None, + Some(step), + finished_drvs, + new_steps, + new_runnable, + )) + .await + } + }) + .buffered(25); + while let Some(v) = tokio_stream::StreamExt::next(&mut stream).await { + match v { + CreateStepResult::None => (), + CreateStepResult::Valid(dep) => { + if !dep.get_finished() && !dep.get_previous_failure() { + // finished can be true if a step was returned, that already exists in + // self.steps and is currently being processed for completion + let mut state = step.state.write(); + state.deps.insert(dep); + } + } + CreateStepResult::PreviousFailure(step) => { + return CreateStepResult::PreviousFailure(step); + } + } + } + + { + step.atomic_state.set_created(true); + if step.get_deps_size() == 0 { + let mut new_runnable = new_runnable.write(); + new_runnable.insert(step.clone()); + } + } + + { + let mut new_steps = new_steps.write(); + new_steps.insert(step.clone()); + } + CreateStepResult::Valid(step) + } + + #[tracing::instrument(skip(self, step), ret, level = "debug")] + async fn check_cached_failure(&self, step: Arc) -> bool { + let Some(drv_outputs) = step.get_outputs() else { + return false; + }; + + let Ok(mut conn) = self.db.get().await else { + return false; + }; + + conn.check_if_paths_failed( + &drv_outputs + .iter() + .filter_map(|o| o.path.as_ref().map(nix_utils::StorePath::get_full_path)) + .collect::>(), + ) + .await + .unwrap_or_default() + } + + #[tracing::instrument(skip(self), err)] + async fn handle_jobset_change(&self) -> anyhow::Result<()> { + let curr_jobsets_in_db = self.db.get().await?.get_jobsets().await?; + + let jobsets = self.jobsets.read(); + for row in curr_jobsets_in_db { + if let Some(i) = jobsets.get(&(row.project.clone(), row.name.clone())) { + if let Err(e) = i.set_shares(row.schedulingshares) { + log::error!( + "Failed to update jobset scheduling shares. project_name={} jobset_name={} e={}", + row.project, + row.name, + e, + ); + } + } + } + + Ok(()) + } + + #[tracing::instrument(skip(self, build), fields(build_id=build.id), err)] + async fn handle_cached_build(&self, build: Arc) -> anyhow::Result<()> { + let res = self.get_build_output_cached(&build.drv_path).await?; + + for (_, path) in &res.outputs { + self.add_root(path); + } + + { + let mut db = self.db.get().await?; + let mut tx = db.begin_transaction().await?; + + log::info!("marking build {} as succeeded (cached)", build.id); + let now = chrono::Utc::now().timestamp(); + tx.mark_succeeded_build( + get_mark_build_sccuess_data(&build, &res), + true, + i32::try_from(now)?, // TODO + i32::try_from(now)?, // TODO + ) + .await?; + self.metrics.nr_builds_done.add(1); + + tx.notify_build_finished(build.id, &[]).await?; + tx.commit().await?; + } + build.set_finished_in_db(true); + + Ok(()) + } + + #[tracing::instrument(skip(self), err)] + async fn get_build_output_cached( + &self, + drv_path: &nix_utils::StorePath, + ) -> anyhow::Result { + let drv = nix_utils::query_drv(drv_path) + .await? + .ok_or(anyhow::anyhow!("Derivation not found"))?; + + { + let mut db = self.db.get().await?; + for o in &drv.outputs { + let Some(out_path) = &o.path else { + continue; + }; + let Some(db_build_output) = db + .get_build_output_for_path(&out_path.get_full_path()) + .await? + else { + continue; + }; + let build_id = db_build_output.id; + let Ok(mut res): anyhow::Result = db_build_output.try_into() else { + continue; + }; + + res.products = db + .get_build_products_for_build_id(build_id) + .await? + .into_iter() + .map(Into::into) + .collect(); + res.metrics = db + .get_build_metrics_for_build_id(build_id) + .await? + .into_iter() + .map(|v| (v.name.clone(), v.into())) + .collect(); + + return Ok(res); + } + } + + BuildOutput::new(&self.store, drv.outputs).await + } + + fn add_root(&self, drv_path: &nix_utils::StorePath) { + let roots_dir = self.config.get_roots_dir(); + nix_utils::add_root(&roots_dir, drv_path); + } + + async fn abort_unsupported(&self) { + let runnable = { + let mut steps = self.steps.write(); + steps.retain(|_, s| s.upgrade().is_some()); + steps + .iter() + .filter_map(|(_, s)| s.upgrade()) + .filter(|v| v.get_runnable()) + .collect::>() + }; + + let now = chrono::Utc::now(); + + let mut aborted = AHashSet::new(); + let mut count = 0; + + let max_unsupported_time = self.config.get_max_unsupported_time(); + for step in &runnable { + let supported = self.machines.support_step(step); + if supported { + step.set_last_supported_now(); + continue; + } + + count += 1; + if (now - step.get_last_supported()) < max_unsupported_time { + continue; + } + + let drv = step.get_drv_path(); + let system = step.get_system(); + log::error!("aborting unsupported build step '{drv}' (type '{system:?}')",); + + aborted.insert(step.clone()); + + let mut dependents = AHashSet::new(); + let mut steps = AHashSet::new(); + step.get_dependents(&mut dependents, &mut steps); + // Maybe the step got cancelled. + if dependents.is_empty() { + continue; + } + + // Find the build that has this step as the top-level (if any). + let Some(build) = dependents + .iter() + .find(|b| &b.drv_path == drv) + .or(dependents.iter().next()) + else { + // this should never happen, as we checked is_empty above and fallback is just any build + continue; + }; + + let mut job = machine::Job::new(build.id, drv.to_owned(), None); + job.result.start_time = Some(now); + job.result.stop_time = Some(now); + job.result.step_status = BuildStatus::Unsupported; + job.result.error_msg = Some(format!( + "unsupported system type '{}'", + system.unwrap_or(String::new()) + )); + if let Err(e) = self.inner_fail_job(drv, None, job, step.clone()).await { + log::error!("Failed to fail step drv={drv} e={e}"); + } + } + + { + let mut queues = self.queues.write().await; + for step in &aborted { + queues.remove_job_by_path(step.get_drv_path()); + } + queues.remove_all_weak_pointer(); + } + self.metrics.nr_unsupported_steps.set(count); + self.metrics + .nr_unsupported_steps_aborted + .add(i64::try_from(aborted.len()).unwrap_or_default()); + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/queue.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/queue.rs new file mode 100644 index 000000000..91cc81628 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/queue.rs @@ -0,0 +1,369 @@ +use std::sync::Weak; +use std::sync::atomic::Ordering; +use std::sync::{Arc, atomic::AtomicBool}; + +use ahash::{AHashMap, AHashSet}; + +use db::models::BuildID; +use nix_utils::BaseStore as _; + +use super::System; +use super::build::Step; + +type Counter = std::sync::atomic::AtomicU64; + +pub struct StepInfo { + pub step: Arc, + pub resolved_drv_path: Option, + already_scheduled: AtomicBool, + cancelled: AtomicBool, + pub runnable_since: chrono::DateTime, + + pub lowest_share_used: f64, + pub highest_global_priority: i32, + pub highest_local_priority: i32, + pub lowest_build_id: BuildID, +} + +impl StepInfo { + pub fn new(store: &nix_utils::LocalStore, step: Arc) -> Self { + let (lowest_share_used, runnable_since) = { + let state = step.state.read(); + + let lowest_share_used = state + .jobsets + .iter() + .map(|v| v.share_used()) + .min_by(f64::total_cmp) + .unwrap_or(1e9); + (lowest_share_used, step.get_runnable_since()) + }; + + Self { + resolved_drv_path: store.try_resolve_drv(step.get_drv_path()), + already_scheduled: false.into(), + cancelled: false.into(), + runnable_since, + lowest_share_used, + highest_global_priority: step + .atomic_state + .highest_global_priority + .load(Ordering::Relaxed), + highest_local_priority: step + .atomic_state + .highest_local_priority + .load(Ordering::Relaxed), + lowest_build_id: step.atomic_state.lowest_build_id.load(Ordering::Relaxed), + step, + } + } + + pub fn get_already_scheduled(&self) -> bool { + self.already_scheduled.load(Ordering::SeqCst) + } + + pub fn set_already_scheduled(&self, v: bool) { + self.already_scheduled.store(v, Ordering::SeqCst); + } + + pub fn set_cancelled(&self, v: bool) { + self.cancelled.store(v, Ordering::SeqCst); + } + + pub fn get_cancelled(&self) -> bool { + self.cancelled.load(Ordering::SeqCst) + } +} + +pub struct BuildQueue { + // Note: ensure that this stays private + jobs: parking_lot::RwLock>>, + + active_runnable: Counter, + total_runnable: Counter, + nr_runnable_waiting: Counter, + nr_runnable_disabled: Counter, + avg_runnable_time: Counter, + wait_time_ms: Counter, +} + +pub struct BuildQueueStats { + pub active_runnable: u64, + pub total_runnable: u64, + pub nr_runnable_waiting: u64, + pub nr_runnable_disabled: u64, + pub avg_runnable_time: u64, + pub wait_time: u64, +} + +impl BuildQueue { + fn new() -> Self { + Self { + jobs: parking_lot::RwLock::new(Vec::new()), + active_runnable: 0.into(), + total_runnable: 0.into(), + nr_runnable_waiting: 0.into(), + nr_runnable_disabled: 0.into(), + avg_runnable_time: 0.into(), + wait_time_ms: 0.into(), + } + } + + pub fn set_nr_runnable_waiting(&self, v: u64) { + self.nr_runnable_waiting.store(v, Ordering::Relaxed); + } + + pub fn set_nr_runnable_disabled(&self, v: u64) { + self.nr_runnable_disabled.store(v, Ordering::Relaxed); + } + + fn incr_active(&self) { + self.active_runnable.fetch_add(1, Ordering::Relaxed); + } + + fn decr_active(&self) { + self.active_runnable.fetch_sub(1, Ordering::Relaxed); + } + + #[tracing::instrument(skip(self, jobs))] + fn insert_new_jobs(&self, jobs: Vec>, now: &chrono::DateTime) { + let mut current_jobs = self.jobs.write(); + let mut wait_time_ms = 0u64; + + for j in jobs { + if let Some(owned) = j.upgrade() { + // this ensures we only ever have each step once + // so ensure that current_jobs is never written anywhere else + // this should never continue as jobs, should already exclude duplicates + if current_jobs + .iter() + .filter_map(std::sync::Weak::upgrade) + .any(|v| v.step.get_drv_path() == owned.step.get_drv_path()) + { + continue; + } + + // runnable since is always > now + wait_time_ms += (*now - owned.runnable_since) + .num_milliseconds() + .unsigned_abs(); + current_jobs.push(j); + } + } + self.wait_time_ms.fetch_add(wait_time_ms, Ordering::Relaxed); + + // only keep valid pointers + drop(current_jobs); + self.scrube_jobs(); + self.sort_jobs(); + } + + #[tracing::instrument(skip(self))] + pub fn sort_jobs(&self) { + let mut current_jobs = self.jobs.write(); + let delta = 0.00001; + current_jobs.sort_by(|a, b| { + let a = a.upgrade(); + let b = b.upgrade(); + match (a, b) { + (Some(a), Some(b)) => (if a.highest_global_priority != b.highest_global_priority { + a.highest_global_priority.cmp(&b.highest_global_priority) + } else if (a.lowest_share_used - b.lowest_share_used).abs() > delta { + b.lowest_share_used.total_cmp(&a.lowest_share_used) + } else if a.highest_local_priority != b.highest_local_priority { + a.highest_local_priority.cmp(&b.highest_local_priority) + } else { + b.lowest_build_id.cmp(&a.lowest_build_id) + }) + .reverse(), + (Some(_), None) => std::cmp::Ordering::Greater, + (None, Some(_)) => std::cmp::Ordering::Less, + (None, None) => std::cmp::Ordering::Equal, + } + }); + } + + #[tracing::instrument(skip(self))] + pub fn scrube_jobs(&self) { + let mut current_jobs = self.jobs.write(); + current_jobs.retain(|v| v.upgrade().is_some()); + self.total_runnable + .store(current_jobs.len() as u64, Ordering::Relaxed); + } + + pub fn clone_inner(&self) -> Vec> { + (*self.jobs.read()).clone() + } + + pub fn get_stats(&self) -> BuildQueueStats { + BuildQueueStats { + active_runnable: self.active_runnable.load(Ordering::Relaxed), + total_runnable: self.total_runnable.load(Ordering::Relaxed), + nr_runnable_waiting: self.nr_runnable_waiting.load(Ordering::Relaxed), + nr_runnable_disabled: self.nr_runnable_disabled.load(Ordering::Relaxed), + avg_runnable_time: self.avg_runnable_time.load(Ordering::Relaxed), + wait_time: self.wait_time_ms.load(Ordering::Relaxed), + } + } +} + +pub struct Queues { + // flat list of all step infos in queues, owning those steps inner queue dont own them + jobs: AHashMap>, + inner: AHashMap>, + #[allow(clippy::type_complexity)] + scheduled: parking_lot::RwLock< + AHashMap, Arc, Arc)>, + >, +} + +impl Queues { + pub fn new() -> Self { + Self { + jobs: AHashMap::new(), + inner: AHashMap::new(), + scheduled: parking_lot::RwLock::new(AHashMap::new()), + } + } + + #[tracing::instrument(skip(self, jobs))] + pub fn insert_new_jobs + std::fmt::Debug>( + &mut self, + system: S, + jobs: Vec, + now: &chrono::DateTime, + ) { + let mut submit_jobs: Vec> = Vec::new(); + for j in jobs { + let j = Arc::new(j); + // we need to check that get_finished is not true! + // the reason for this is that while a job is currently being proccessed for finished + // it can be resubmitted into the queues. + // to ensure that this does not block everything we need to ensure that it doesnt land + // here. + if !self.jobs.contains_key(j.step.get_drv_path()) && !j.step.get_finished() { + self.jobs + .insert(j.step.get_drv_path().to_owned(), j.clone()); + submit_jobs.push(Arc::downgrade(&j)); + } + } + + let queue = self + .inner + .entry(system.into()) + .or_insert_with(|| Arc::new(BuildQueue::new())); + // queues are sorted afterwards + queue.insert_new_jobs(submit_jobs, now); + } + + #[tracing::instrument(skip(self))] + pub fn remove_all_weak_pointer(&mut self) { + for queue in self.inner.values() { + queue.scrube_jobs(); + } + } + + pub fn clone_inner(&self) -> AHashMap> { + self.inner.clone() + } + + pub fn iter(&self) -> std::collections::hash_map::Iter<'_, System, Arc> { + self.inner.iter() + } + + #[tracing::instrument(skip(self, step, queue))] + pub fn add_job_to_scheduled( + &self, + step: &Arc, + queue: &Arc, + machine: Arc, + ) { + let mut scheduled = self.scheduled.write(); + + let drv = step.step.get_drv_path(); + scheduled.insert(drv.to_owned(), (step.clone(), queue.clone(), machine)); + step.set_already_scheduled(true); + queue.incr_active(); + } + + #[tracing::instrument(skip(self), fields(%drv))] + pub fn remove_job_from_scheduled( + &self, + drv: &nix_utils::StorePath, + ) -> Option<(Arc, Arc, Arc)> { + let mut scheduled = self.scheduled.write(); + + let (step_info, queue, machine) = scheduled.remove(drv)?; + step_info.set_already_scheduled(false); + queue.decr_active(); + Some((step_info, queue, machine)) + } + + pub fn remove_job_by_path(&mut self, drv: &nix_utils::StorePath) { + if self.jobs.remove(drv).is_none() { + log::error!("Failed to remove stepinfo drv={drv} from jobs!"); + } + } + + #[tracing::instrument(skip(self, stepinfo, queue))] + pub fn remove_job(&mut self, stepinfo: &Arc, queue: &Arc) { + if self.jobs.remove(stepinfo.step.get_drv_path()).is_none() { + log::error!( + "Failed to remove stepinfo drv={} from jobs!", + stepinfo.step.get_drv_path(), + ); + } + // active should be removed + queue.scrube_jobs(); + } + + #[tracing::instrument(skip(self))] + pub async fn kill_active_steps(&self) -> Vec<(nix_utils::StorePath, uuid::Uuid)> { + let active = { + let scheduled = self.scheduled.read(); + scheduled.clone() + }; + + let mut cancelled_steps = vec![]; + for (drv_path, (step_info, _, machine)) in &active { + if step_info.get_cancelled() { + continue; + } + + let mut dependents = AHashSet::new(); + let mut steps = AHashSet::new(); + step_info.step.get_dependents(&mut dependents, &mut steps); + if !dependents.is_empty() { + continue; + } + + { + step_info.set_cancelled(true); + if let Err(e) = machine.abort_build(drv_path).await { + log::error!("Failed to abort build drv_path={drv_path} e={e}"); + continue; + } + + cancelled_steps.push((drv_path.to_owned(), machine.id)); + } + } + cancelled_steps + } + + #[tracing::instrument(skip(self))] + pub fn get_stats_per_queue(&self) -> AHashMap { + self.inner + .iter() + .map(|(k, v)| (k.clone(), v.get_stats())) + .collect() + } + + pub fn get_jobs(&self) -> Vec> { + self.jobs.values().map(Clone::clone).collect() + } + + pub fn get_scheduled(&self) -> Vec> { + let s = self.scheduled.read(); + s.iter().map(|(_, (s, _, _))| s.clone()).collect() + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/state/uploader.rs b/src/hydra-queue-runner-v2/queue-runner/src/state/uploader.rs new file mode 100644 index 000000000..76148ff66 --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/state/uploader.rs @@ -0,0 +1,119 @@ +use nix_utils::BaseStore as _; + +// TODO: scheduling is shit, because if we crash/restart we need to start again as the builds are +// already done in the db. +// So we need to make this persistent! + +struct Message { + store_paths: Vec, + log_remote_path: String, + log_local_path: String, +} + +pub struct Uploader { + upload_queue_sender: tokio::sync::mpsc::UnboundedSender, + upload_queue_receiver: tokio::sync::Mutex>, +} + +impl Uploader { + pub fn new() -> Self { + let (upload_queue_tx, upload_queue_rx) = tokio::sync::mpsc::unbounded_channel::(); + Self { + upload_queue_sender: upload_queue_tx, + upload_queue_receiver: tokio::sync::Mutex::new(upload_queue_rx), + } + } + + #[tracing::instrument(skip(self), err)] + pub fn schedule_upload( + &self, + store_paths: Vec, + log_remote_path: String, + log_local_path: String, + ) -> anyhow::Result<()> { + log::info!("Scheduling new path upload: {:?}", store_paths); + self.upload_queue_sender.send(Message { + store_paths, + log_remote_path, + log_local_path, + })?; + Ok(()) + } + + async fn upload_msg( + &self, + local_store: nix_utils::LocalStore, + remote_stores: Vec, + msg: Message, + ) { + // TODO: we need retries for this! We can not affored to have a failure on cache push + log::info!("Uploading paths: {:?}", msg.store_paths); + + for remote_store in remote_stores { + if let Err(e) = remote_store + .upsert_file( + msg.log_remote_path.clone(), + std::path::PathBuf::from(msg.log_local_path.clone()), + "text/plain; charset=utf-8", + ) + .await + { + log::error!("Failed to copy path to remote store: {e}"); + } + + let paths_to_copy = local_store + .query_requisites(msg.store_paths.clone(), false) + .await + .unwrap_or_default(); + let paths_to_copy = remote_store.query_missing_paths(paths_to_copy).await; + if let Err(e) = nix_utils::copy_paths( + local_store.as_base_store(), + remote_store.as_base_store(), + &paths_to_copy, + false, + false, + false, + ) + .await + { + log::error!("Failed to copy path to remote store: {e}"); + } + } + + log::info!("Finished uploading paths: {:?}", msg.store_paths); + } + + pub async fn upload_once( + &self, + local_store: nix_utils::LocalStore, + remote_stores: Vec, + ) { + let Some(msg) = ({ + let mut rx = self.upload_queue_receiver.lock().await; + rx.recv().await + }) else { + return; + }; + + self.upload_msg(local_store, remote_stores, msg).await; + } + + pub async fn upload_many( + &self, + local_store: nix_utils::LocalStore, + remote_stores: Vec, + limit: usize, + ) { + let mut messages: Vec = Vec::with_capacity(limit); + { + let mut rx = self.upload_queue_receiver.lock().await; + rx.recv_many(&mut messages, limit).await; + } + + let mut jobs = vec![]; + for msg in messages { + jobs.push(self.upload_msg(local_store.clone(), remote_stores.clone(), msg)); + } + futures::future::join_all(jobs).await; + } +} diff --git a/src/hydra-queue-runner-v2/queue-runner/src/utils.rs b/src/hydra-queue-runner-v2/queue-runner/src/utils.rs new file mode 100644 index 000000000..314fef59e --- /dev/null +++ b/src/hydra-queue-runner-v2/queue-runner/src/utils.rs @@ -0,0 +1,119 @@ +use db::Transaction; +use db::models::BuildID; +use nix_utils::BaseStore as _; + +use crate::state::RemoteBuild; + +#[tracing::instrument(skip(tx, res), err)] +pub async fn finish_build_step( + tx: &mut Transaction<'_>, + build_id: BuildID, + step_nr: i32, + res: &RemoteBuild, + machine: Option, +) -> anyhow::Result<()> { + debug_assert!(res.start_time.is_some()); + debug_assert!(res.stop_time.is_some()); + tx.update_build_step_in_finish(db::models::UpdateBuildStepInFinish { + build_id, + step_nr, + status: res.step_status, + error_msg: res.error_msg.as_deref(), + start_time: i32::try_from(res.start_time.map(|s| s.timestamp()).unwrap_or_default())?, + stop_time: i32::try_from(res.stop_time.map(|s| s.timestamp()).unwrap_or_default())?, + machine: machine.as_deref(), + overhead: if res.overhead != 0 { + Some(res.overhead) + } else { + None + }, + times_built: if res.times_build > 0 { + Some(res.times_build) + } else { + None + }, + is_non_deterministic: if res.times_build > 0 { + Some(res.is_non_deterministic) + } else { + None + }, + }) + .await?; + debug_assert!(!res.log_file.is_empty()); + debug_assert!(!res.log_file.contains('\t')); + + tx.notify_step_finished(build_id, step_nr, &res.log_file) + .await?; + + if res.step_status == db::models::BuildStatus::Success { + // Update the corresponding `BuildStepOutputs` row to add the output path + let drv_path = tx.get_drv_path_from_build_step(build_id, step_nr).await?; + if let Some(drv_path) = drv_path { + // If we've finished building, all the paths should be known + if let Some(drv) = nix_utils::query_drv(&nix_utils::StorePath::new(&drv_path)).await? { + for o in drv.outputs { + if let Some(path) = o.path { + tx.update_build_step_output( + build_id, + step_nr, + &o.name, + &path.get_full_path(), + ) + .await?; + } + } + } + } + } + Ok(()) +} + +#[tracing::instrument(skip(db, store, o, build_opts, remote_store), fields(%drv_path), err(level=tracing::Level::WARN))] +pub async fn substitute_output( + db: db::Database, + store: nix_utils::LocalStore, + o: nix_utils::DerivationOutput, + build_id: BuildID, + drv_path: &nix_utils::StorePath, + build_opts: &nix_utils::BuildOptions, + remote_store: Option<&nix_utils::RemoteStore>, +) -> anyhow::Result<()> { + let Some(path) = &o.path else { + return Ok(()); + }; + + let starttime = i32::try_from(chrono::Utc::now().timestamp())?; // TODO + let (mut child, _) = nix_utils::realise_drv(path, build_opts, false).await?; + nix_utils::validate_statuscode(child.wait().await?)?; + if let Some(remote_store) = remote_store { + let paths_to_copy = store + .query_requisites(vec![path.to_owned()], false) + .await + .unwrap_or_default(); + let paths_to_copy = remote_store.query_missing_paths(paths_to_copy).await; + nix_utils::copy_paths( + store.as_base_store(), + remote_store.as_base_store(), + &paths_to_copy, + false, + true, + false, + ) + .await?; + } + let stoptime = i32::try_from(chrono::Utc::now().timestamp())?; // TODO + + let mut db = db.get().await?; + let mut tx = db.begin_transaction().await?; + tx.create_substitution_step( + starttime, + stoptime, + build_id, + &drv_path.get_full_path(), + (o.name, o.path.map(|p| p.get_full_path())), + ) + .await?; + tx.commit().await?; + + Ok(()) +}