From 08665e027c17b89642b48a39894d0e0d40f55cdc Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Tue, 16 Dec 2025 18:55:56 +0300 Subject: [PATCH 01/12] bump: introducing rc.1 --- .clippy.toml | 2 +- .github/workflows/cd.yaml | 15 - .github/workflows/ci.yaml | 2 +- .gitignore | 1 - Cargo.lock | 3451 +++++++++++++++++ Cargo.toml | 4 +- apalis-core/Cargo.toml | 10 +- apalis-core/README.md | 4 +- apalis-core/src/backend/codec.rs | 52 - apalis-core/src/backend/config.rs | 7 - apalis-core/src/backend/custom.rs | 47 +- apalis-core/src/backend/impls/dequeue.rs | 296 ++ apalis-core/src/backend/impls/memory.rs | 28 +- apalis-core/src/backend/impls/mod.rs | 4 +- apalis-core/src/backend/mod.rs | 38 +- apalis-core/src/backend/pipe.rs | 10 +- apalis-core/src/backend/poll_strategy/mod.rs | 63 +- apalis-core/src/lib.rs | 5 +- apalis-core/src/monitor/mod.rs | 31 +- apalis-core/src/task/extensions.rs | 2 +- apalis-core/src/task/metadata.rs | 11 - apalis-core/src/task/status.rs | 9 +- apalis-core/src/task/task_id.rs | 2 +- apalis-core/src/task_fn/guide.rs | 4 +- apalis-core/src/worker/context.rs | 2 +- .../src/worker/ext/circuit_breaker/service.rs | 21 +- apalis-core/src/worker/mod.rs | 9 +- apalis-sql/Cargo.toml | 6 +- apalis-workflow/Cargo.toml | 10 +- apalis-workflow/README.md | 4 +- apalis-workflow/src/dag/mod.rs | 3 +- apalis-workflow/src/id_generator.rs | 27 + apalis-workflow/src/lib.rs | 10 +- apalis/Cargo.toml | 7 +- apalis/README.md | 18 +- apalis/src/lib.rs | 3 + deny.toml | 235 ++ examples/basics/Cargo.toml | 2 +- examples/basics/src/main.rs | 59 +- examples/fn-args/src/main.rs | 2 +- examples/graceful-shutdown/Cargo.toml | 2 +- examples/graceful-shutdown/src/main.rs | 27 +- examples/prometheus/Cargo.toml | 3 +- examples/prometheus/src/main.rs | 2 +- examples/workflow/Cargo.toml | 3 +- examples/workflow/src/main.rs | 2 +- supply-chain/config.toml | 10 +- utils/apalis-codec/Cargo.toml | 24 + utils/apalis-codec/README.md | 28 + utils/apalis-codec/src/bincode.rs | 54 + utils/apalis-codec/src/json.rs | 90 + utils/apalis-codec/src/lib.rs | 15 + utils/apalis-codec/src/msgpack.rs | 56 + utils/apalis-file-storage/Cargo.toml | 42 + utils/apalis-file-storage/README.md | 0 .../apalis-file-storage/src}/backend.rs | 43 +- .../apalis-file-storage/src/lib.rs | 24 +- .../apalis-file-storage/src}/meta.rs | 17 +- .../apalis-file-storage/src}/shared.rs | 51 +- .../apalis-file-storage/src}/sink.rs | 26 +- .../apalis-file-storage/src}/util.rs | 31 +- 61 files changed, 4619 insertions(+), 447 deletions(-) create mode 100644 Cargo.lock delete mode 100644 apalis-core/src/backend/config.rs create mode 100644 apalis-core/src/backend/impls/dequeue.rs create mode 100644 deny.toml create mode 100644 utils/apalis-codec/Cargo.toml create mode 100644 utils/apalis-codec/README.md create mode 100644 utils/apalis-codec/src/bincode.rs create mode 100644 utils/apalis-codec/src/json.rs create mode 100644 utils/apalis-codec/src/lib.rs create mode 100644 utils/apalis-codec/src/msgpack.rs create mode 100644 utils/apalis-file-storage/Cargo.toml create mode 100644 utils/apalis-file-storage/README.md rename {apalis-core/src/backend/impls/json => utils/apalis-file-storage/src}/backend.rs (76%) rename apalis-core/src/backend/impls/json/mod.rs => utils/apalis-file-storage/src/lib.rs (96%) rename {apalis-core/src/backend/impls/json => utils/apalis-file-storage/src}/meta.rs (55%) rename {apalis-core/src/backend/impls/json => utils/apalis-file-storage/src}/shared.rs (87%) rename {apalis-core/src/backend/impls/json => utils/apalis-file-storage/src}/sink.rs (76%) rename {apalis-core/src/backend/impls/json => utils/apalis-file-storage/src}/util.rs (88%) diff --git a/.clippy.toml b/.clippy.toml index 3fbd6b78..af9e8f29 100644 --- a/.clippy.toml +++ b/.clippy.toml @@ -1,4 +1,4 @@ -msrv = "1.60.0" +msrv = "1.85.0" # Minimum supported Rust version because of `2024` edition features allow-mixed-uninlined-format-args = false disallowed-types = [ { path = "tower::util::BoxCloneService", reason = "Use tower::util::BoxCloneSyncService instead" }, diff --git a/.github/workflows/cd.yaml b/.github/workflows/cd.yaml index 28221eb1..f4b3043e 100644 --- a/.github/workflows/cd.yaml +++ b/.github/workflows/cd.yaml @@ -37,21 +37,6 @@ jobs: echo VERSION=${VERSION} >> $GITHUB_ENV echo PUBLISH_OPTS=${PUBLISH_OPTS} >> $GITHUB_ENV echo VERSION_NUMBER=${VERSION_NUMBER} >> $GITHUB_ENV - - name: check version integrity - run: | - ERROR='' - echo VERSION: ${VERSION}, VERSION_NUMBER: ${VERSION_NUMBER} - for dir in "." packages/apalis-{core,sql}; do - PACKAGE=$(cargo get package.name --entry $dir) - ACTUAL=$(cargo get package.version --entry $dir) - if [[ $VERSION != $ACTUAL ]]; then - echo ${PACKAGE}: expected version ${VERSION} but found ${ACTUAL} - ERROR=1 - fi - done - if [[ $ERROR ]]; then - exit 1 - fi - name: publish apalis-core uses: actions-rs/cargo@v1 with: diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 43daa98a..ea2b9f96 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -45,7 +45,7 @@ jobs: - uses: actions-rs/cargo@v1 with: command: test - # args: --all + args: --all fmt: diff --git a/.gitignore b/.gitignore index 299ce7a6..340e62e7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ /target **/*.rs.bk -Cargo.lock examples/**/*.env examples/sqlite/data.* .DS_Store diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 00000000..8377ebb3 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,3451 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "addr2line" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.100" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" + +[[package]] +name = "apalis" +version = "1.0.0-rc.1" +dependencies = [ + "apalis-core", + "apalis-file-storage", + "apalis-workflow", + "document-features", + "futures-util", + "metrics", + "metrics-exporter-prometheus", + "pin-project", + "sentry-core 0.45.0", + "serde", + "thiserror 2.0.17", + "tokio", + "tower 0.5.2", + "tracing", + "ulid", + "uuid", +] + +[[package]] +name = "apalis-codec" +version = "0.1.0-rc.1" +dependencies = [ + "apalis-core", + "bincode", + "rmp-serde", + "serde", + "serde_json", + "thiserror 2.0.17", +] + +[[package]] +name = "apalis-core" +version = "1.0.0-rc.1" +dependencies = [ + "document-features", + "futures-channel", + "futures-core", + "futures-sink", + "futures-timer", + "futures-util", + "pin-project", + "serde", + "thiserror 2.0.17", + "tokio", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "apalis-file-storage" +version = "0.1.0-rc.1" +dependencies = [ + "apalis-codec", + "apalis-core", + "apalis-workflow", + "futures-channel", + "futures-core", + "futures-sink", + "futures-util", + "serde", + "serde_json", + "tokio", +] + +[[package]] +name = "apalis-sql" +version = "1.0.0-rc.1" +dependencies = [ + "apalis-core", + "chrono", + "serde", + "serde_json", + "thiserror 2.0.17", +] + +[[package]] +name = "apalis-workflow" +version = "0.1.0-rc.1" +dependencies = [ + "apalis-core", + "apalis-file-storage", + "futures", + "petgraph", + "rand 0.9.2", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tower 0.5.2", + "tracing", + "ulid", + "uuid", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "aws-lc-rs" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b5ce75405893cd713f9ab8e297d8e438f624dde7d706108285f7e17a25a180f" +dependencies = [ + "aws-lc-sys", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "179c3777a8b5e70e90ea426114ffc565b2c1a9f82f6c4a0c5a34aa6ef5e781b6" +dependencies = [ + "cc", + "cmake", + "dunce", + "fs_extra", +] + +[[package]] +name = "axum" +version = "0.5.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acee9fd5073ab6b045a275b3e709c163dd36c90685219cb21804a147b58dba43" +dependencies = [ + "async-trait", + "axum-core", + "bitflags 1.3.2", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "hyper 0.14.32", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 0.1.2", + "tokio", + "tower 0.4.13", + "tower-http 0.3.5", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37e5939e02c56fecd5c017c37df4238c0a839fa76b7f97acdd7efb804fd181cc" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "mime", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.76" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" +dependencies = [ + "addr2line", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", + "windows-link", +] + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "basics" +version = "0.1.0" +dependencies = [ + "apalis", + "apalis-core", + "email-service", + "futures", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tower 0.4.13", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "bincode" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740" +dependencies = [ + "bincode_derive", + "serde", + "unty", +] + +[[package]] +name = "bincode_derive" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf95709a440f45e986983918d0e8a1f30a9b1df04918fc828670606804ac3c09" +dependencies = [ + "virtue", +] + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + +[[package]] +name = "block2" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdeb9d870516001442e364c5220d3574d2da8dc765554b4a617230d33fa58ef5" +dependencies = [ + "objc2", +] + +[[package]] +name = "bumpalo" +version = "3.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3" + +[[package]] +name = "catch-panic" +version = "0.1.0" +dependencies = [ + "anyhow", + "apalis", + "email-service", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "cc" +version = "1.2.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "chrono" +version = "0.4.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" +dependencies = [ + "iana-time-zone", + "js-sys", + "num-traits", + "serde", + "wasm-bindgen", + "windows-link", +] + +[[package]] +name = "cmake" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b042e5d8a74ae91bb0961acd039822472ec99f8ab0948cbf6d1369588f8be586" +dependencies = [ + "cc", +] + +[[package]] +name = "core-foundation" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "serde", + "uuid", +] + +[[package]] +name = "deranged" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "dispatch2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec" +dependencies = [ + "bitflags 2.10.0", + "objc2", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "document-features" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] + +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "email-service" +version = "0.1.0" +dependencies = [ + "apalis", + "email_address", + "futures-util", + "log", + "serde", + "serde_json", +] + +[[package]] +name = "email_address" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e079f19b08ca6239f47f8ba8509c11cf3ea30095831f7fed61441475edd8c449" +dependencies = [ + "serde", +] + +[[package]] +name = "env_logger" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580" +dependencies = [ + "humantime", + "is-terminal", + "log", + "regex", + "termcolor", +] + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "find-msvc-tools" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844" + +[[package]] +name = "findshlibs" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40b9e59cd0f7e0806cca4be089683ecb6434e602038df21fe6bf6711b2f07f64" +dependencies = [ + "cc", + "lazy_static", + "libc", + "winapi", +] + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "fn-args" +version = "0.1.0" +dependencies = [ + "apalis", + "futures", + "serde", + "tokio", + "tower 0.4.13", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "foreign-types" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" +dependencies = [ + "foreign-types-shared", +] + +[[package]] +name = "foreign-types-shared" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + +[[package]] +name = "futures" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-timer" +version = "3.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "getrandom" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "gimli" +version = "0.32.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" + +[[package]] +name = "graceful-shutdown" +version = "0.1.0" +dependencies = [ + "apalis", + "apalis-core", + "futures", + "serde", + "thiserror 2.0.17", + "tokio", + "tower 0.4.13", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "h2" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +dependencies = [ + "atomic-waker", + "bytes", + "fnv", + "futures-core", + "futures-sink", + "http 1.4.0", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash 0.1.5", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "foldhash 0.2.0", +] + +[[package]] +name = "hdrhistogram" +version = "7.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" +dependencies = [ + "byteorder", + "num-traits", +] + +[[package]] +name = "hermit-abi" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hostname" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" +dependencies = [ + "cfg-if", + "libc", + "windows-link", +] + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http 0.2.12", + "pin-project-lite", +] + +[[package]] +name = "http-body" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +dependencies = [ + "bytes", + "http 1.4.0", +] + +[[package]] +name = "http-body-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" +dependencies = [ + "bytes", + "futures-core", + "http 1.4.0", + "http-body 1.0.1", + "pin-project-lite", +] + +[[package]] +name = "http-range-header" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" + +[[package]] +name = "httparse" +version = "1.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "hyper" +version = "0.14.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2 0.5.10", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" +dependencies = [ + "atomic-waker", + "bytes", + "futures-channel", + "futures-core", + "h2", + "http 1.4.0", + "http-body 1.0.1", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "pin-utils", + "smallvec", + "tokio", + "want", +] + +[[package]] +name = "hyper-rustls" +version = "0.27.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" +dependencies = [ + "http 1.4.0", + "hyper 1.8.1", + "hyper-util", + "rustls", + "rustls-native-certs", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "hyper 1.8.1", + "ipnet", + "libc", + "percent-encoding", + "pin-project-lite", + "socket2 0.6.1", + "tokio", + "tower-service", + "tracing", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.64" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", +] + +[[package]] +name = "ipnet" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + +[[package]] +name = "iri-string" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "is-terminal" +version = "0.4.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" +dependencies = [ + "hermit-abi", + "libc", + "windows-sys 0.61.2", +] + +[[package]] +name = "itoa" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "libc" +version = "0.2.178" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "matchers" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" +dependencies = [ + "regex-automata", +] + +[[package]] +name = "matchit" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" + +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "metrics" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d5312e9ba3771cfa961b585728215e3d972c950a3eed9252aa093d6301277e8" +dependencies = [ + "ahash", + "portable-atomic", +] + +[[package]] +name = "metrics-exporter-prometheus" +version = "0.17.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b166dea96003ee2531cf14833efedced545751d800f03535801d833313f8c15" +dependencies = [ + "base64", + "http-body-util", + "hyper 1.8.1", + "hyper-rustls", + "hyper-util", + "indexmap", + "ipnet", + "metrics", + "metrics-util", + "quanta", + "thiserror 2.0.17", + "tokio", + "tracing", +] + +[[package]] +name = "metrics-util" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdfb1365fea27e6dd9dc1dbc19f570198bc86914533ad639dae939635f096be4" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", + "hashbrown 0.16.1", + "metrics", + "quanta", + "rand 0.9.2", + "rand_xoshiro", + "sketches-ddsketch", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", +] + +[[package]] +name = "mio" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.61.2", +] + +[[package]] +name = "native-tls" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +dependencies = [ + "libc", + "log", + "openssl", + "openssl-probe", + "openssl-sys", + "schannel", + "security-framework 2.11.1", + "security-framework-sys", + "tempfile", +] + +[[package]] +name = "nix" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74523f3a35e05aba87a1d978330aef40f67b0304ac79c1c00b294c9830543db6" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases", + "libc", +] + +[[package]] +name = "nu-ansi-term" +version = "0.50.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "objc2" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c2599ce0ec54857b29ce62166b0ed9b4f6f1a70ccc9a71165b6154caca8c05" +dependencies = [ + "objc2-encode", +] + +[[package]] +name = "objc2-cloud-kit" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73ad74d880bb43877038da939b7427bba67e9dd42004a18b809ba7d87cee241c" +dependencies = [ + "bitflags 2.10.0", + "objc2", + "objc2-foundation", +] + +[[package]] +name = "objc2-core-data" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b402a653efbb5e82ce4df10683b6b28027616a2715e90009947d50b8dd298fa" +dependencies = [ + "objc2", + "objc2-foundation", +] + +[[package]] +name = "objc2-core-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a180dd8642fa45cdb7dd721cd4c11b1cadd4929ce112ebd8b9f5803cc79d536" +dependencies = [ + "bitflags 2.10.0", + "dispatch2", + "objc2", +] + +[[package]] +name = "objc2-core-graphics" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e022c9d066895efa1345f8e33e584b9f958da2fd4cd116792e15e07e4720a807" +dependencies = [ + "bitflags 2.10.0", + "dispatch2", + "objc2", + "objc2-core-foundation", + "objc2-io-surface", +] + +[[package]] +name = "objc2-core-image" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d563b38d2b97209f8e861173de434bd0214cf020e3423a52624cd1d989f006" +dependencies = [ + "objc2", + "objc2-foundation", +] + +[[package]] +name = "objc2-core-location" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca347214e24bc973fc025fd0d36ebb179ff30536ed1f80252706db19ee452009" +dependencies = [ + "objc2", + "objc2-foundation", +] + +[[package]] +name = "objc2-core-text" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cde0dfb48d25d2b4862161a4d5fcc0e3c24367869ad306b0c9ec0073bfed92d" +dependencies = [ + "bitflags 2.10.0", + "objc2", + "objc2-core-foundation", + "objc2-core-graphics", +] + +[[package]] +name = "objc2-encode" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef25abbcd74fb2609453eb695bd2f860d389e457f67dc17cafc8b8cbc89d0c33" + +[[package]] +name = "objc2-foundation" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3e0adef53c21f888deb4fa59fc59f7eb17404926ee8a6f59f5df0fd7f9f3272" +dependencies = [ + "bitflags 2.10.0", + "block2", + "libc", + "objc2", + "objc2-core-foundation", +] + +[[package]] +name = "objc2-io-surface" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180788110936d59bab6bd83b6060ffdfffb3b922ba1396b312ae795e1de9d81d" +dependencies = [ + "bitflags 2.10.0", + "objc2", + "objc2-core-foundation", +] + +[[package]] +name = "objc2-quartz-core" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c1358452b371bf9f104e21ec536d37a650eb10f7ee379fff67d2e08d537f1f" +dependencies = [ + "bitflags 2.10.0", + "objc2", + "objc2-core-foundation", + "objc2-foundation", +] + +[[package]] +name = "objc2-ui-kit" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87d638e33c06f577498cbcc50491496a3ed4246998a7fbba7ccb98b1e7eab22" +dependencies = [ + "bitflags 2.10.0", + "block2", + "objc2", + "objc2-cloud-kit", + "objc2-core-data", + "objc2-core-foundation", + "objc2-core-graphics", + "objc2-core-image", + "objc2-core-location", + "objc2-core-text", + "objc2-foundation", + "objc2-quartz-core", + "objc2-user-notifications", +] + +[[package]] +name = "objc2-user-notifications" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9df9128cbbfef73cda168416ccf7f837b62737d748333bfe9ab71c245d76613e" +dependencies = [ + "objc2", + "objc2-foundation", +] + +[[package]] +name = "object" +version = "0.37.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "openssl" +version = "0.10.75" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "foreign-types", + "libc", + "once_cell", + "openssl-macros", + "openssl-sys", +] + +[[package]] +name = "openssl-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "openssl-probe" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" + +[[package]] +name = "openssl-sys" +version = "0.9.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "os_info" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4022a17595a00d6a369236fdae483f0de7f0a339960a53118b818238e132224" +dependencies = [ + "android_system_properties", + "log", + "nix", + "objc2", + "objc2-foundation", + "objc2-ui-kit", + "serde", + "windows-sys 0.61.2", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "petgraph" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" +dependencies = [ + "fixedbitset", + "hashbrown 0.15.5", + "indexmap", + "serde", + "serde_derive", +] + +[[package]] +name = "pin-project" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "proc-macro2" +version = "1.0.103" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prometheus-example" +version = "0.1.0" +dependencies = [ + "anyhow", + "apalis", + "apalis-core", + "apalis-file-storage", + "axum", + "email-service", + "futures", + "metrics-exporter-prometheus", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "quanta" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3ab5a9d756f0d97bdc89019bd2e4ea098cf9cde50ee7564dde6b81ccc8f06c7" +dependencies = [ + "crossbeam-utils", + "libc", + "once_cell", + "raw-cpuid", + "wasi", + "web-sys", + "winapi", +] + +[[package]] +name = "quote" +version = "1.0.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom 0.2.16", +] + +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "rand_xoshiro" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f703f4665700daf5512dcca5f43afa6af89f09db47fb56be587f80636bda2d41" +dependencies = [ + "rand_core 0.9.3", +] + +[[package]] +name = "raw-cpuid" +version = "11.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.10.0", +] + +[[package]] +name = "regex" +version = "1.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" + +[[package]] +name = "reqwest" +version = "0.12.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" +dependencies = [ + "base64", + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "http-body-util", + "hyper 1.8.1", + "hyper-tls", + "hyper-util", + "js-sys", + "log", + "native-tls", + "percent-encoding", + "pin-project-lite", + "rustls-pki-types", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper 1.0.2", + "tokio", + "tokio-native-tls", + "tower 0.5.2", + "tower-http 0.6.8", + "tower-service", + "url", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "retries-example" +version = "0.1.0" +dependencies = [ + "anyhow", + "apalis", + "chrono", + "email-service", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "ring" +version = "0.17.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" +dependencies = [ + "cc", + "cfg-if", + "getrandom 0.2.16", + "libc", + "untrusted", + "windows-sys 0.52.0", +] + +[[package]] +name = "rmp" +version = "0.8.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "228ed7c16fa39782c3b3468e974aec2795e9089153cd08ee2e9aefb3613334c4" +dependencies = [ + "byteorder", + "num-traits", + "paste", +] + +[[package]] +name = "rmp-serde" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52e599a477cf9840e92f2cde9a7189e67b42c57532749bf90aea6ec10facd4db" +dependencies = [ + "byteorder", + "rmp", + "serde", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +dependencies = [ + "bitflags 2.10.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys 0.61.2", +] + +[[package]] +name = "rustls" +version = "0.23.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +dependencies = [ + "aws-lc-rs", + "once_cell", + "rustls-pki-types", + "rustls-webpki", + "subtle", + "zeroize", +] + +[[package]] +name = "rustls-native-certs" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923" +dependencies = [ + "openssl-probe", + "rustls-pki-types", + "schannel", + "security-framework 3.5.1", +] + +[[package]] +name = "rustls-pki-types" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +dependencies = [ + "zeroize", +] + +[[package]] +name = "rustls-webpki" +version = "0.103.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52" +dependencies = [ + "aws-lc-rs", + "ring", + "rustls-pki-types", + "untrusted", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" + +[[package]] +name = "schannel" +version = "0.1.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.10.0", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework" +version = "3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" +dependencies = [ + "bitflags 2.10.0", + "core-foundation 0.10.1", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "sentry" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "255914a8e53822abd946e2ce8baa41d4cded6b8e938913b7f7b9da5b7ab44335" +dependencies = [ + "httpdate", + "native-tls", + "reqwest", + "sentry-backtrace", + "sentry-contexts", + "sentry-core 0.37.0", + "sentry-debug-images", + "sentry-panic", + "sentry-tracing", + "tokio", + "ureq", +] + +[[package]] +name = "sentry-backtrace" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00293cd332a859961f24fd69258f7e92af736feaeb91020cff84dac4188a4302" +dependencies = [ + "backtrace", + "once_cell", + "regex", + "sentry-core 0.37.0", +] + +[[package]] +name = "sentry-contexts" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "961990f9caa76476c481de130ada05614cd7f5aa70fb57c2142f0e09ad3fb2aa" +dependencies = [ + "hostname", + "libc", + "os_info", + "rustc_version", + "sentry-core 0.37.0", + "uname", +] + +[[package]] +name = "sentry-core" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a6409d845707d82415c800290a5d63be5e3df3c2e417b0997c60531dfbd35ef" +dependencies = [ + "once_cell", + "rand 0.8.5", + "sentry-types 0.37.0", + "serde", + "serde_json", +] + +[[package]] +name = "sentry-core" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3b6729c8e71ac968edbe9bf2dd4109c162e552b52bacd2b07e24ede1aba84a5" +dependencies = [ + "sentry-types 0.45.0", + "serde", + "serde_json", + "url", +] + +[[package]] +name = "sentry-debug-images" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71ab5df4f3b64760508edfe0ba4290feab5acbbda7566a79d72673065888e5cc" +dependencies = [ + "findshlibs", + "once_cell", + "sentry-core 0.37.0", +] + +[[package]] +name = "sentry-example" +version = "0.1.0" +dependencies = [ + "anyhow", + "apalis", + "chrono", + "email-service", + "env_logger", + "sentry", + "sentry-tower", + "sentry-tracing", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "sentry-panic" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "609b1a12340495ce17baeec9e08ff8ed423c337c1a84dffae36a178c783623f3" +dependencies = [ + "sentry-backtrace", + "sentry-core 0.37.0", +] + +[[package]] +name = "sentry-tower" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b98005537e38ee3bc10e7d36e7febe9b8e573d03f2ddd85fcdf05d21f9abd6d" +dependencies = [ + "sentry-core 0.37.0", + "tower-layer", + "tower-service", +] + +[[package]] +name = "sentry-tracing" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f4e86402d5c50239dc7d8fd3f6d5e048221d5fcb4e026d8d50ab57fe4644cb" +dependencies = [ + "sentry-backtrace", + "sentry-core 0.37.0", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sentry-types" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3f117b8755dbede8260952de2aeb029e20f432e72634e8969af34324591631" +dependencies = [ + "debugid", + "hex", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror 1.0.69", + "time", + "url", + "uuid", +] + +[[package]] +name = "sentry-types" +version = "0.45.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c19d1d1967b55659c358886d0f1aa3076488d445f84c7d727d384c675adaec1" +dependencies = [ + "debugid", + "hex", + "rand 0.9.2", + "serde", + "serde_json", + "thiserror 2.0.17", + "time", + "url", + "uuid", +] + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + +[[package]] +name = "serde_urlencoded" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" +dependencies = [ + "form_urlencoded", + "itoa", + "ryu", + "serde", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "signal-hook-registry" +version = "1.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" +dependencies = [ + "libc", +] + +[[package]] +name = "sketches-ddsketch" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1e9a774a6c28142ac54bb25d25562e6bcf957493a184f15ad4eebccb23e410a" + +[[package]] +name = "slab" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "socket2" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "socket2" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881" +dependencies = [ + "libc", + "windows-sys 0.60.2", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + +[[package]] +name = "syn" +version = "2.0.111" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tempfile" +version = "3.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" +dependencies = [ + "fastrand", + "getrandom 0.3.4", + "once_cell", + "rustix", + "windows-sys 0.61.2", +] + +[[package]] +name = "termcolor" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8" +dependencies = [ + "thiserror-impl 2.0.17", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "time" +version = "0.3.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" + +[[package]] +name = "time-macros" +version = "0.2.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" +dependencies = [ + "num-conv", + "time-core", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" +dependencies = [ + "bytes", + "libc", + "mio", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2 0.6.1", + "tokio-macros", + "windows-sys 0.61.2", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-native-tls" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" +dependencies = [ + "native-tls", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.26.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" +dependencies = [ + "rustls", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" +dependencies = [ + "futures-core", + "futures-util", + "hdrhistogram", + "indexmap", + "pin-project-lite", + "slab", + "sync_wrapper 1.0.2", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" +dependencies = [ + "bitflags 1.3.2", + "bytes", + "futures-core", + "futures-util", + "http 0.2.12", + "http-body 0.4.6", + "http-range-header", + "pin-project-lite", + "tower 0.4.13", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags 2.10.0", + "bytes", + "futures-util", + "http 1.4.0", + "http-body 1.0.1", + "iri-string", + "pin-project-lite", + "tower 0.5.2", + "tower-layer", + "tower-service", +] + +[[package]] +name = "tower-layer" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + +[[package]] +name = "tower-service" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + +[[package]] +name = "tracing" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +dependencies = [ + "log", + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-example" +version = "0.1.0" +dependencies = [ + "anyhow", + "apalis", + "chrono", + "email-service", + "env_logger", + "futures", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex-automata", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", + "tracing-serde", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "ulid" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe" +dependencies = [ + "rand 0.9.2", + "uuid", + "web-time", +] + +[[package]] +name = "uname" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b72f89f0ca32e4db1c04e2a72f5345d59796d4866a1ee0609084569f73683dc8" +dependencies = [ + "libc", +] + +[[package]] +name = "unicode-ident" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5" + +[[package]] +name = "unmonitored-worker" +version = "0.1.0" +dependencies = [ + "apalis", + "futures", + "serde", + "tokio", + "tower 0.4.13", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "untrusted" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + +[[package]] +name = "unty" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" + +[[package]] +name = "ureq" +version = "2.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "02d1a66277ed75f640d608235660df48c8e3c19f3b4edb6a263315626cc3c01d" +dependencies = [ + "base64", + "log", + "native-tls", + "once_cell", + "url", +] + +[[package]] +name = "url" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "uuid" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" +dependencies = [ + "getrandom 0.3.4", + "js-sys", + "serde_core", + "wasm-bindgen", +] + +[[package]] +name = "valuable" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba73ea9cf16a25df0c8caa16c51acb937d5712a8429db78a3ee29d5dcacd3a65" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "virtue" +version = "0.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "051eb1abcf10076295e815102942cc58f9d5e3b4560e46e53c21e8ff6f3af7b1" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" +dependencies = [ + "cfg-if", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "web-sys" +version = "0.3.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys 0.61.2", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", +] + +[[package]] +name = "windows-targets" +version = "0.53.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" +dependencies = [ + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_i686_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" + +[[package]] +name = "wit-bindgen" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" + +[[package]] +name = "workflow" +version = "0.1.0" +dependencies = [ + "apalis", + "apalis-core", + "apalis-file-storage", + "apalis-workflow", + "futures", + "serde", + "serde_json", + "tokio", + "tower 0.5.2", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "zeroize" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/Cargo.toml b/Cargo.toml index c2266195..b3b9536d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,11 +1,11 @@ [workspace] -members = ["apalis", "apalis-*", "examples/*"] +members = ["apalis", "apalis-*", "examples/*", "utils/*"] # Only check / build main crates by default (check all with `--workspace`) default-members = ["apalis", "apalis-*"] resolver = "2" [workspace.package] -rust-version = "1.75" +rust-version = "1.85" edition = "2024" repository = "https://github.com/apalis-dev/apalis" diff --git a/apalis-core/Cargo.toml b/apalis-core/Cargo.toml index 81d6d7f2..e1aab8c6 100644 --- a/apalis-core/Cargo.toml +++ b/apalis-core/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "apalis-core" -version = "1.0.0-beta.2" +version = "1.0.0-rc.1" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true license = "MIT" description = "Core for apalis: simple, extensible multithreaded background processing for Rust" categories = ["concurrency"] -readme = "../README.md" +readme = "README.md" publish = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -33,9 +33,6 @@ pin-project = "1" thiserror = "2.0.0" futures-timer = { version = "3.0.3", optional = true } tracing = { version = "0.1.41", default-features = false, optional = true } -# Needed for the codec -serde_json = { version = "1", optional = true } - [dependencies.document-features] version = "0.2" @@ -49,8 +46,6 @@ docsrs = ["dep:document-features"] sleep = ["dep:futures-timer"] ## Enable serde support serde = ["dep:serde"] -## Enable serde_json support -json = ["serde", "dep:serde_json"] ## Enable tracing support tracing = ["dep:tracing"] ## Enable test utilities @@ -71,7 +66,6 @@ tokio = { version = "1.37.0", features = [ "signal", ] } tower = { version = "0.5", features = ["full"] } -apalis-workflow = { path = "../apalis-workflow", version = "0.1.0-alpha.6" } # For json backend tests [lints] workspace = true diff --git a/apalis-core/README.md b/apalis-core/README.md index 80a4d559..4ea5df41 100644 --- a/apalis-core/README.md +++ b/apalis-core/README.md @@ -46,11 +46,11 @@ let task: Task = TaskBuilder::new("my-task".to_string()) Specific documentation for tasks can be found in the [`task`] and [`task::builder`] modules. ##### Relevant Guides: -- [**Defining Task arguments**](https://docs.rs/apalis-core/1.0.0-beta.2/apalis_core/task_fn/guide/index.html) - Creating effective task arguments that are scalable and type-safe +- [**Defining Task arguments**](https://docs.rs/apalis-core/1.0.0-rc.1/apalis_core/task_fn/guide/index.html) - Creating effective task arguments that are scalable and type-safe ### Backends -The [`Backend`](https://docs.rs/apalis-core/1.0.0-beta.2/apalis_core/backend/trait.Backend.html) trait serves as the core abstraction for all task sources. +The [`Backend`](https://docs.rs/apalis-core/1.0.0-rc.1/apalis_core/backend/trait.Backend.html) trait serves as the core abstraction for all task sources. It defines task polling mechanisms, streaming interfaces, and middleware integration points.
diff --git a/apalis-core/src/backend/codec.rs b/apalis-core/src/backend/codec.rs index 8ce33bb6..4d0e88a5 100644 --- a/apalis-core/src/backend/codec.rs +++ b/apalis-core/src/backend/codec.rs @@ -111,55 +111,3 @@ where self.inner.poll_compact(worker) } } - -/// Encoding for tasks using json -#[cfg(feature = "json")] -pub mod json { - use std::marker::PhantomData; - - use serde::{Serialize, de::DeserializeOwned}; - use serde_json::Value; - - use super::Codec; - - /// Json encoding and decoding - #[derive(Debug, Clone, Default)] - pub struct JsonCodec { - _o: PhantomData, - } - - impl Codec for JsonCodec> { - type Compact = Vec; - type Error = serde_json::Error; - fn encode(input: &T) -> Result, Self::Error> { - serde_json::to_vec(input) - } - - fn decode(compact: &Vec) -> Result { - serde_json::from_slice(compact) - } - } - - impl Codec for JsonCodec { - type Compact = String; - type Error = serde_json::Error; - fn encode(input: &T) -> Result { - serde_json::to_string(input) - } - fn decode(compact: &String) -> Result { - serde_json::from_str(compact) - } - } - - impl Codec for JsonCodec { - type Compact = Value; - type Error = serde_json::Error; - fn encode(input: &T) -> Result { - serde_json::to_value(input) - } - - fn decode(compact: &Value) -> Result { - T::deserialize(compact) - } - } -} diff --git a/apalis-core/src/backend/config.rs b/apalis-core/src/backend/config.rs deleted file mode 100644 index 202d16d9..00000000 --- a/apalis-core/src/backend/config.rs +++ /dev/null @@ -1,7 +0,0 @@ -use crate::backend::{Backend, queue::Queue}; - -/// Extension trait for accessing queue configuration -pub trait ConfigExt: Backend { - /// Get the queue configuration - fn get_queue(&self) -> Queue; -} diff --git a/apalis-core/src/backend/custom.rs b/apalis-core/src/backend/custom.rs index 718387a1..f05c66f5 100644 --- a/apalis-core/src/backend/custom.rs +++ b/apalis-core/src/backend/custom.rs @@ -298,7 +298,7 @@ impl BackendBuilder Result, BuildError> { - let mut db = self.database.ok_or(BuildError::MissingPool)?; + let mut db = self.database.ok_or(BuildError::MissingDb)?; let config = self.config.ok_or(BuildError::MissingConfig)?; let sink_fn = self.sink.ok_or(BuildError::MissingSink)?; let sink = sink_fn(&mut db, &config); @@ -322,7 +322,7 @@ impl BackendBuilder Backend for CustomBackend where @@ -345,9 +353,9 @@ where type Context = Ctx; - type Error = BoxDynError; + type Error = CustomBackendError; - type Stream = TaskStream, BoxDynError>; + type Stream = TaskStream, CustomBackendError>; type Beat = BoxStream<'static, Result<(), Self::Error>>; @@ -366,7 +374,7 @@ where .map(|task| match task { Ok(Some(t)) => Ok(Some(t)), Ok(None) => Ok(None), - Err(e) => Err(e.into()), + Err(e) => Err(e.into().into()), }) .boxed() } @@ -376,23 +384,36 @@ impl Sink> for CustomBackend where S: Sink>, + S::Error: Into, { - type Error = S::Error; + type Error = CustomBackendError; fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().current_sink.poll_ready_unpin(cx) + self.project() + .current_sink + .poll_ready_unpin(cx) + .map_err(|e| CustomBackendError::Inner(e.into())) } fn start_send(self: Pin<&mut Self>, item: Task) -> Result<(), Self::Error> { - self.project().current_sink.start_send_unpin(item) + self.project() + .current_sink + .start_send_unpin(item) + .map_err(|e| CustomBackendError::Inner(e.into())) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().current_sink.poll_flush_unpin(cx) + self.project() + .current_sink + .poll_flush_unpin(cx) + .map_err(|e| CustomBackendError::Inner(e.into())) } fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - self.project().current_sink.poll_close_unpin(cx) + self.project() + .current_sink + .poll_close_unpin(cx) + .map_err(|e| CustomBackendError::Inner(e.into())) } } @@ -426,8 +447,8 @@ mod tests { let item = db.pop_front(); drop(db); match item { - Some(item) => Some((Ok::<_, BoxDynError>(Some(item)), p)), - None => Some((Ok::<_, BoxDynError>(None), p)), + Some(item) => Some((Ok::<_, CustomBackendError>(Some(item)), p)), + None => Some((Ok::<_, CustomBackendError>(None), p)), } }) .boxed() @@ -438,7 +459,7 @@ mod tests { let mut db = p.lock().await; db.push_back(item); drop(db); - Ok::<_, BoxDynError>(p) + Ok::<_, CustomBackendError>(p) } .boxed() }) diff --git a/apalis-core/src/backend/impls/dequeue.rs b/apalis-core/src/backend/impls/dequeue.rs new file mode 100644 index 00000000..91d2da5d --- /dev/null +++ b/apalis-core/src/backend/impls/dequeue.rs @@ -0,0 +1,296 @@ +use std::{ + collections::VecDeque, + fmt, + fmt::Debug, + pin::Pin, + sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, + }, + time::Duration, +}; + +use futures_core::stream::BoxStream; +use futures_sink::Sink; +use futures_util::{ + FutureExt, StreamExt, TryStreamExt, + lock::Mutex, + sink::{self}, + stream, +}; +use tower_layer::Identity; + +use crate::{ + backend::{ + Backend, BackendExt, + codec::IdentityCodec, + custom::{BackendBuilder, CustomBackend}, + poll_strategy::{ + IntervalStrategy, MultiStrategy, PollContext, PollStrategyExt, StrategyBuilder, + }, + }, + error::BoxDynError, + task::{Task, task_id::RandomId}, + worker::context::WorkerContext, +}; + +/// Wrapper type for the shared +#[derive(Debug)] +pub struct InMemoryDb { + inner: Arc>>>, +} + +impl Clone for InMemoryDb { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + +impl InMemoryDb { + /// Create a new InMemoryDb instance + #[must_use] + pub fn new() -> Self { + Self { + inner: Arc::new(Mutex::new(VecDeque::new())), + } + } + + /// Consume the InMemoryDb and return the inner Arc> + #[must_use] + pub fn into_inner(self) -> Arc>>> { + self.inner + } + + /// Get a reference to the inner Arc> + #[must_use] + pub fn as_arc(&self) -> &Arc>>> { + &self.inner + } +} + +impl Default for InMemoryDb { + fn default() -> Self { + Self::new() + } +} + +/// Configuration for the in-memory VecDeque backend +#[derive(Debug, Clone)] +pub struct Config { + strategy: MultiStrategy, + prev_count: Arc, +} +/// Type alias for the boxed sink type +pub type BoxSink<'a, T> = Pin + Send + Sync + 'a>>; +/// Type alias for the sink type +pub type InMemorySink = BoxSink<'static, Task>; + +/// Type alias for the complete in-memory backend +pub struct VecDequeBackend( + CustomBackend< + T, + InMemoryDb, + BoxStream<'static, Result>, VecDequeError>>, + InMemorySink, + RandomId, + Config, + >, +); + +impl Debug for VecDequeBackend { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VecDequeBackend").finish() + } +} + +impl Clone for VecDequeBackend { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +/// Errors encountered while using the `VecDequeBackend` +#[derive(Debug, thiserror::Error, Clone)] +pub enum VecDequeError { + /// Error occurred during polling + #[error("Polling error: {0}")] + PollError(Arc), + /// Error occurred during sending + #[error("Sending error: {0}")] + SendError(Arc), +} + +impl Backend for VecDequeBackend +where + T: Send + 'static, +{ + type Args = T; + + type IdType = RandomId; + + type Context = (); + + type Stream = BoxStream<'static, Result>, VecDequeError>>; + + type Layer = Identity; + + type Beat = BoxStream<'static, Result<(), VecDequeError>>; + + type Error = VecDequeError; + + fn heartbeat(&self, worker: &WorkerContext) -> Self::Beat { + self.0 + .heartbeat(worker) + .map_err(|e| VecDequeError::PollError(Arc::new(e.into()))) + .boxed() + } + + fn middleware(&self) -> Self::Layer { + self.0.middleware() + } + fn poll(self, worker: &WorkerContext) -> Self::Stream { + self.0 + .poll(worker) + .map_err(|e| VecDequeError::PollError(Arc::new(e.into()))) + .boxed() + } +} + +impl BackendExt for VecDequeBackend +where + T: Send + 'static, +{ + type Codec = IdentityCodec; + type Compact = T; + type CompactStream = Self::Stream; + + fn get_queue(&self) -> crate::backend::queue::Queue { + std::any::type_name::().into() + } + + fn poll_compact(self, worker: &WorkerContext) -> Self::CompactStream { + self.0 + .poll(worker) + .map_err(|e| VecDequeError::PollError(Arc::new(e.into()))) + .boxed() + } +} + +impl Sink> for VecDequeBackend +where + T: Send + 'static, +{ + type Error = VecDequeError; + + fn poll_ready( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.get_mut().0) + .poll_ready(cx) + .map_err(|e| VecDequeError::SendError(Arc::new(e.into()))) + } + + fn start_send(self: Pin<&mut Self>, item: Task) -> Result<(), Self::Error> { + Pin::new(&mut self.get_mut().0) + .start_send(item) + .map_err(|e| VecDequeError::SendError(Arc::new(e.into()))) + } + + fn poll_flush( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.get_mut().0) + .poll_flush(cx) + .map_err(|e| VecDequeError::SendError(Arc::new(e.into()))) + } + + fn poll_close( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + Pin::new(&mut self.get_mut().0) + .poll_close(cx) + .map_err(|e| VecDequeError::SendError(Arc::new(e.into()))) + } +} + +/// Create an in-memory `VecDeque` backend with polling strategy for tasks of type T +#[must_use] +pub fn backend(poll_interval: Duration) -> VecDequeBackend +where + T: Send + 'static, +{ + let memory = InMemoryDb::new(); + + let strategy = StrategyBuilder::new() + .apply(IntervalStrategy::new(poll_interval)) + .build(); + + let config = Config { + strategy, + prev_count: Arc::new(AtomicUsize::new(1)), + }; + + let backend = BackendBuilder::new_with_cfg(config) + .database(memory) + .fetcher( + |db: &mut InMemoryDb, config: &Config, worker: &WorkerContext| { + let poll_strategy = config.strategy.clone(); + let poll_ctx = PollContext::new(worker.clone(), config.prev_count.clone()); + let poller = poll_strategy.build_stream(&poll_ctx); + stream::unfold( + (db.clone(), config.clone(), poller, worker.clone()), + |(p, config, mut poller, ctx)| async move { + poller.next().await; + let Some(mut db) = p.inner.try_lock() else { + return Some(( + Err::>, VecDequeError>( + VecDequeError::PollError(Arc::new( + "Failed to acquire lock".into(), + )), + ), + (p, config, poller, ctx), + )); + }; + let item = db.pop_front(); + drop(db); + if let Some(item) = item { + config.prev_count.store(1, Ordering::Relaxed); + Some((Ok::<_, VecDequeError>(Some(item)), (p, config, poller, ctx))) + } else { + config.prev_count.store(0, Ordering::Relaxed); + Some(( + Ok::>, VecDequeError>(None), + (p, config, poller, ctx), + )) + } + }, + ) + .boxed() + }, + ) + .sink(|db, _| { + Box::pin(sink::unfold(db.clone(), move |p, item| { + async move { + let Some(mut db) = p.inner.try_lock() else { + return Err(VecDequeError::PollError(Arc::new( + "Failed to acquire lock".into(), + ))); + }; + db.push_back(item); + drop(db); + Ok::<_, VecDequeError>(p) + } + .boxed() + .shared() + })) as _ + }) + .build() + .unwrap(); + + VecDequeBackend(backend) +} diff --git a/apalis-core/src/backend/impls/memory.rs b/apalis-core/src/backend/impls/memory.rs index 2d6f0277..fc99d2a6 100644 --- a/apalis-core/src/backend/impls/memory.rs +++ b/apalis-core/src/backend/impls/memory.rs @@ -63,6 +63,9 @@ use std::{ }; use tower_layer::Identity; +/// A boxed in-memory task receiver stream +pub type BoxedReceiver = Pin> + Send>>; + /// In-memory queue that is based on channels /// /// @@ -106,7 +109,7 @@ use tower_layer::Identity; }] pub struct MemoryStorage { pub(super) sender: MemorySink, - pub(super) receiver: Pin> + Send>>, + pub(super) receiver: BoxedReceiver, } impl Default for MemoryStorage { @@ -133,6 +136,14 @@ impl MemoryStorage { } } +impl MemoryStorage { + /// Create a storage given a sender and receiver + #[must_use] + pub fn new_with(sender: MemorySink, receiver: BoxedReceiver) -> Self { + Self { sender, receiver } + } +} + impl Sink> for MemoryStorage { type Error = SendError; @@ -156,7 +167,7 @@ impl Sink> for MemoryStorage { } } -type MemorySinkInner = Arc< +type ArcMemorySink = Arc< futures_util::lock::Mutex< Box, Error = SendError> + Send + Sync + Unpin + 'static>, >, @@ -164,7 +175,14 @@ type MemorySinkInner = Arc< /// Memory sink for sending tasks to the in-memory backend pub struct MemorySink { - pub(super) inner: MemorySinkInner, + pub(super) inner: ArcMemorySink, +} + +impl MemorySink { + /// Build a new memory sink given a sink + pub fn new(sink: ArcMemorySink) -> Self { + Self { inner: sink } + } } impl std::fmt::Debug for MemorySink { @@ -260,6 +278,10 @@ impl BackendExt for Memory type Compact = Args; type CompactStream = TaskStream, Self::Error>; + fn get_queue(&self) -> crate::backend::queue::Queue { + std::any::type_name::().into() + } + fn poll_compact(self, _worker: &WorkerContext) -> Self::CompactStream { (self.receiver.map(|task| Ok(Some(task))).boxed()) as _ } diff --git a/apalis-core/src/backend/impls/mod.rs b/apalis-core/src/backend/impls/mod.rs index ced3d7fe..b79c4343 100644 --- a/apalis-core/src/backend/impls/mod.rs +++ b/apalis-core/src/backend/impls/mod.rs @@ -1,6 +1,4 @@ +pub(super) mod dequeue; /// A guide to using the implementing a backend pub mod guide; pub(super) mod memory; - -#[cfg(feature = "json")] -pub(super) mod json; diff --git a/apalis-core/src/backend/mod.rs b/apalis-core/src/backend/mod.rs index 314c1ef3..227ed174 100644 --- a/apalis-core/src/backend/mod.rs +++ b/apalis-core/src/backend/mod.rs @@ -23,7 +23,7 @@ use std::{future::Future, time::Duration}; use futures_util::{Stream, stream::BoxStream}; use crate::{ - backend::codec::Codec, + backend::{codec::Codec, queue::Queue}, error::BoxDynError, task::{Task, status::Status, task_id::TaskId}, worker::context::WorkerContext, @@ -36,7 +36,6 @@ pub mod poll_strategy; pub mod queue; pub mod shared; -mod config; mod expose; mod impls; mod sink; @@ -46,17 +45,14 @@ pub use sink::*; pub use impls::guide; -pub use config::ConfigExt; - /// In-memory backend based on channels pub mod memory { pub use crate::backend::impls::memory::*; } -/// File based Backend using JSON -#[cfg(feature = "json")] -pub mod json { - pub use crate::backend::impls::json::*; +/// In-memory dequeue backend +pub mod dequeue { + pub use crate::backend::impls::dequeue::*; } /// The `Backend` trait defines how workers get and manage tasks from a backend. @@ -99,6 +95,9 @@ pub trait BackendExt: Backend { Item = Result>, Self::Error>, >; + /// Returns the queue associated with the backend. + fn get_queue(&self) -> Queue; + /// Polls the backend for encoded tasks for the given worker. fn poll_compact(self, worker: &WorkerContext) -> Self::CompactStream; } @@ -167,15 +166,18 @@ pub trait RegisterWorker: Backend { /// Represents the result of a task execution #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Debug, Clone)] -pub struct TaskResult { - task_id: TaskId, - status: Status, - result: Result, +pub struct TaskResult { + /// The unique identifier of the task + pub task_id: TaskId, + /// The status of the task + pub status: Status, + /// The result of the task execution + pub result: Result, } -impl TaskResult { +impl TaskResult { /// Create a new TaskResult - pub fn new(task_id: TaskId, status: Status, result: Result) -> Self { + pub fn new(task_id: TaskId, status: Status, result: Result) -> Self { Self { task_id, status, @@ -183,7 +185,7 @@ impl TaskResult { } } /// Get the ID of the task - pub fn task_id(&self) -> &TaskId { + pub fn task_id(&self) -> &TaskId { &self.task_id } @@ -206,7 +208,9 @@ impl TaskResult { /// Allows waiting for tasks to complete and checking their status pub trait WaitForCompletion: Backend { /// The result stream type yielding task results - type ResultStream: Stream, Self::Error>> + Send + 'static; + type ResultStream: Stream, Self::Error>> + + Send + + 'static; /// Wait for multiple tasks to complete, yielding results as they become available fn wait_for( @@ -223,5 +227,5 @@ pub trait WaitForCompletion: Backend { fn check_status( &self, task_ids: impl IntoIterator> + Send, - ) -> impl Future>, Self::Error>> + Send; + ) -> impl Future>, Self::Error>> + Send; } diff --git a/apalis-core/src/backend/pipe.rs b/apalis-core/src/backend/pipe.rs index 8ea3cd25..c0b425f0 100644 --- a/apalis-core/src/backend/pipe.rs +++ b/apalis-core/src/backend/pipe.rs @@ -9,8 +9,7 @@ //! //! ```rust //! # use futures_util::stream; -//! # use apalis_core::backend::pipe::PipeExt; -//! # use apalis_core::backend::json::JsonStorage; +//! # use apalis_core::backend::{pipe::PipeExt, dequeue}; //! # use apalis_core::worker::{builder::WorkerBuilder, context::WorkerContext}; //! # use apalis_core::error::BoxDynError; //! # use std::time::Duration; @@ -20,7 +19,7 @@ //! async fn main() { //! let stm = stream::iter(0..10).map(|s| Ok::<_, std::io::Error>(s)); //! -//! let in_memory = JsonStorage::new_temp().unwrap(); +//! let in_memory = dequeue::backend::(Duration::from_secs(1)); //! let backend = stm.pipe_to(in_memory); //! //! async fn task(task: u32, ctx: WorkerContext) -> Result<(), BoxDynError> { @@ -225,14 +224,13 @@ pub enum PipeError { } #[cfg(test)] -#[cfg(feature = "json")] mod tests { use std::{io, time::Duration}; use futures_util::stream; use crate::{ - backend::json::JsonStorage, + backend::dequeue, error::BoxDynError, worker::{ builder::WorkerBuilder, context::WorkerContext, ext::event_listener::EventListenerExt, @@ -246,7 +244,7 @@ mod tests { #[tokio::test] async fn basic_worker() { let stm = stream::iter(0..ITEMS).map(Ok::<_, io::Error>); - let in_memory = JsonStorage::new_temp().unwrap(); + let in_memory = dequeue::backend::(Duration::from_secs(1)); let backend = Pipe::new(stm, in_memory); diff --git a/apalis-core/src/backend/poll_strategy/mod.rs b/apalis-core/src/backend/poll_strategy/mod.rs index 3f9b9dc3..bb6cecbe 100644 --- a/apalis-core/src/backend/poll_strategy/mod.rs +++ b/apalis-core/src/backend/poll_strategy/mod.rs @@ -117,69 +117,12 @@ mod tests { const ITEMS: u32 = 10; - type InMemoryQueue = Arc>>>; - #[tokio::test] #[cfg(feature = "sleep")] async fn basic_strategy_backend() { - use crate::backend::custom::BackendBuilder; - - let memory: InMemoryQueue = Arc::new(Mutex::new(VecDeque::new())); - - #[derive(Clone)] - struct Config { - strategy: MultiStrategy, - prev_count: Arc, - } - let strategy = StrategyBuilder::new() - .apply(IntervalStrategy::new(Duration::from_millis(100))) - .build(); - - let config = Config { - strategy, - prev_count: Arc::new(AtomicUsize::new(1)), - }; + use crate::backend::impls::dequeue::backend; - let mut backend = BackendBuilder::new_with_cfg(config) - .database(memory) - .fetcher(|db, config, worker| { - let poll_strategy = config.strategy.clone(); - let poll_ctx = PollContext::new(worker.clone(), config.prev_count.clone()); - let poller = poll_strategy.build_stream(&poll_ctx); - stream::unfold( - (db.clone(), config.clone(), poller, worker.clone()), - |(p, config, mut poller, ctx)| async move { - let _ = poller.next().await; - let mut db = p.lock().await; - let item = db.pop_front(); - drop(db); - if let Some(item) = item { - config.prev_count.store(1, Ordering::Relaxed); - Some((Ok::<_, BoxDynError>(Some(item)), (p, config, poller, ctx))) - } else { - config.prev_count.store(0, Ordering::Relaxed); - Some(( - Ok::>, BoxDynError>(None), - (p, config, poller, ctx), - )) - } - }, - ) - .boxed() - }) - .sink(|db, _| { - sink::unfold(db.clone(), move |p, item| { - async move { - let mut db = p.lock().await; - db.push_back(item); - drop(db); - Ok::<_, BoxDynError>(p) - } - .boxed() - }) - }) - .build() - .unwrap(); + let mut backend = backend(Duration::from_secs(1)); for i in 0..ITEMS { backend.send(Task::new(i)).await.unwrap(); @@ -209,7 +152,7 @@ mod tests { async fn custom_strategy_backend() { use crate::backend::custom::BackendBuilder; - let memory: InMemoryQueue = Arc::new(Mutex::new(VecDeque::new())); + let memory = Arc::new(Mutex::new(VecDeque::new())); #[derive(Clone)] struct Config { diff --git a/apalis-core/src/lib.rs b/apalis-core/src/lib.rs index 046ecb9d..19e5cea1 100644 --- a/apalis-core/src/lib.rs +++ b/apalis-core/src/lib.rs @@ -168,15 +168,14 @@ //! ```rust //! # use apalis_core::monitor::Monitor; //! # use apalis_core::worker::builder::WorkerBuilder; -//! # use apalis_core::backend::json::JsonStorage; //! # use apalis_core::task::Task; -//! # use apalis_core::backend::TaskSink; +//! # use apalis_core::backend::{TaskSink, dequeue}; //! # use tower::service_fn; //! # use std::time::Duration; //! # use apalis_core::worker::context::WorkerContext; //! #[tokio::main] //! async fn main() { -//! let mut storage = JsonStorage::new_temp().unwrap(); +//! let mut storage = dequeue::backend::(Duration::from_secs(1)); //! storage.push(1u32).await.unwrap(); //! //! let monitor = Monitor::new() diff --git a/apalis-core/src/monitor/mod.rs b/apalis-core/src/monitor/mod.rs index 6105b682..7e8f4e37 100644 --- a/apalis-core/src/monitor/mod.rs +++ b/apalis-core/src/monitor/mod.rs @@ -577,11 +577,10 @@ impl std::fmt::Display for ExitError { } #[cfg(test)] -#[cfg(feature = "json")] mod tests { use super::*; use crate::{ - backend::{TaskSink, json::JsonStorage}, + backend::{TaskSink, dequeue::backend}, task::task_id::TaskId, worker::context::WorkerContext, }; @@ -595,7 +594,7 @@ mod tests { #[tokio::test] async fn basic_with_workers() { - let mut backend = JsonStorage::new_temp().unwrap(); + let mut backend = backend(Duration::from_millis(100)); for i in 0..10 { backend.push(i).await.unwrap(); @@ -605,7 +604,7 @@ mod tests { let monitor = monitor.register(move |index| { WorkerBuilder::new(format!("rango-tango-{index}")) .backend(backend.clone()) - .build(move |r: u32, id: TaskId, w: WorkerContext| async move { + .build(move |r: u32, id: TaskId<_>, w: WorkerContext| async move { println!("{id:?}, {}", w.name()); tokio::time::sleep(Duration::from_secs(index as u64)).await; Ok::<_, io::Error>(r) @@ -620,10 +619,7 @@ mod tests { } #[tokio::test] async fn test_monitor_run() { - let mut backend = JsonStorage::new( - "/var/folders/h_/sd1_gb5x73bbcxz38dts7pj80000gp/T/apalis-json-store-girmm9e36pz", - ) - .unwrap(); + let mut backend = backend(Duration::from_millis(100)); for i in 0..10 { backend.push(i).await.unwrap(); @@ -669,10 +665,16 @@ mod tests { #[tokio::test] async fn test_monitor_register_multiple() { - let mut backend = JsonStorage::new_temp().unwrap(); + let mut int_backend = backend(Duration::from_millis(500)); + + let mut str_backend = backend(Duration::from_millis(500)); for i in 0..10 { - backend.push(i).await.unwrap(); + int_backend.push(i).await.unwrap(); + } + + for i in 0..10 { + str_backend.push(i.to_string()).await.unwrap(); } let monitor: Monitor = Monitor::new(); @@ -682,14 +684,13 @@ mod tests { let monitor = monitor.on_event(|wrk, e| { println!("{:?}, {e:?}", wrk.name()); }); - let b = backend.clone(); let monitor = monitor .register(move |index| { WorkerBuilder::new(format!("worker0-{index}")) - .backend(backend.clone()) + .backend(int_backend.clone()) .layer(ConcurrencyLimitLayer::new(1)) .build( - move |request: i32, id: TaskId, w: WorkerContext| async move { + move |request: i32, id: TaskId<_>, w: WorkerContext| async move { println!("{id:?}, {}", w.name()); tokio::time::sleep(Duration::from_secs(index as u64)).await; Ok::<_, io::Error>(request) @@ -698,10 +699,10 @@ mod tests { }) .register(move |index| { WorkerBuilder::new(format!("worker1-{index}")) - .backend(b.clone()) + .backend(str_backend.clone()) .layer(ConcurrencyLimitLayer::new(1)) .build( - move |request: i32, id: TaskId, w: WorkerContext| async move { + move |request: String, id: TaskId<_>, w: WorkerContext| async move { println!("{id:?}, {}", w.name()); tokio::time::sleep(Duration::from_secs(index as u64)).await; Ok::<_, io::Error>(request) diff --git a/apalis-core/src/task/extensions.rs b/apalis-core/src/task/extensions.rs index f8a060f4..fa826ee6 100644 --- a/apalis-core/src/task/extensions.rs +++ b/apalis-core/src/task/extensions.rs @@ -204,7 +204,7 @@ impl Extensions { #[inline] #[must_use] pub fn is_empty(&self) -> bool { - self.map.as_ref().map_or(true, |map| map.is_empty()) + self.map.as_ref().is_none_or(|map| map.is_empty()) } /// Get the number of extensions available. diff --git a/apalis-core/src/task/metadata.rs b/apalis-core/src/task/metadata.rs index b4860ef4..333729a1 100644 --- a/apalis-core/src/task/metadata.rs +++ b/apalis-core/src/task/metadata.rs @@ -76,17 +76,6 @@ mod tests { } } - #[cfg(feature = "json")] - impl MetadataExt for SampleStore { - type Error = Infallible; - fn extract(&self) -> Result { - unimplemented!() - } - fn inject(&mut self, _: T) -> Result<(), Self::Error> { - unimplemented!() - } - } - impl Service> for ExampleService where diff --git a/apalis-core/src/task/status.rs b/apalis-core/src/task/status.rs index bb89c175..adfdf826 100644 --- a/apalis-core/src/task/status.rs +++ b/apalis-core/src/task/status.rs @@ -20,9 +20,10 @@ use std::{ #[repr(u8)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[non_exhaustive] -#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] pub enum Status { /// Task is pending + #[default] Pending, /// Task is queued for execution, but no worker has picked it up Queued, @@ -36,12 +37,6 @@ pub enum Status { Killed, } -impl Default for Status { - fn default() -> Self { - Self::Pending - } -} - /// Errors that can occur when parsing a `Status` from a string #[derive(Debug, thiserror::Error)] pub enum StatusError { diff --git a/apalis-core/src/task/task_id.rs b/apalis-core/src/task/task_id.rs index fcf39538..a367f034 100644 --- a/apalis-core/src/task/task_id.rs +++ b/apalis-core/src/task/task_id.rs @@ -18,7 +18,7 @@ pub use random_id::RandomId; /// A wrapper type that defines a task id. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Debug, Clone, Copy, Eq, Hash, PartialEq, PartialOrd, Ord)] -pub struct TaskId(IdType); +pub struct TaskId(IdType); impl TaskId { /// Generate a new [`TaskId`] diff --git a/apalis-core/src/task_fn/guide.rs b/apalis-core/src/task_fn/guide.rs index 00af0eb0..d20b71c0 100644 --- a/apalis-core/src/task_fn/guide.rs +++ b/apalis-core/src/task_fn/guide.rs @@ -73,11 +73,11 @@ //! //! Example: //! ```rust -//! # use apalis_core::task::{attempt::Attempt, data::Data, task_id::TaskId}; +//! # use apalis_core::task::{attempt::Attempt, data::Data, task_id::TaskId, task_id::RandomId }; //! #[derive(Clone)] //! struct State; //! -//! async fn process_task(_: u32, attempt: Attempt, state: Data, id: TaskId) -> String { +//! async fn process_task(_: u32, attempt: Attempt, state: Data, id: TaskId) -> String { //! format!("Attempt {} for task {} with state", attempt.current(), id) //! } //! ``` diff --git a/apalis-core/src/worker/context.rs b/apalis-core/src/worker/context.rs index 44246c50..467585e2 100644 --- a/apalis-core/src/worker/context.rs +++ b/apalis-core/src/worker/context.rs @@ -296,7 +296,7 @@ impl WorkerContext { if let Ok(mut waker_guard) = self.waker.lock() { if waker_guard .as_ref() - .map_or(true, |stored_waker| !stored_waker.will_wake(cx.waker())) + .is_none_or(|stored_waker| !stored_waker.will_wake(cx.waker())) { *waker_guard = Some(cx.waker().clone()); } diff --git a/apalis-core/src/worker/ext/circuit_breaker/service.rs b/apalis-core/src/worker/ext/circuit_breaker/service.rs index 7029556e..5878412a 100644 --- a/apalis-core/src/worker/ext/circuit_breaker/service.rs +++ b/apalis-core/src/worker/ext/circuit_breaker/service.rs @@ -80,9 +80,8 @@ impl CircuitBreakerService { } fn should_allow_request(&self) -> bool { - let mut stats = match self.stats.write() { - Ok(stats) => stats, - Err(_) => return false, // If poisoned, reject request + let Ok(mut stats) = self.stats.write() else { + return false; // If poisoned, reject request }; match stats.state { @@ -113,11 +112,9 @@ impl CircuitBreakerService { /// Record a successful request pub fn record_success(&self) { - let mut stats = match self.stats.write() { - Ok(stats) => stats, - Err(_) => return, // If poisoned, skip recording + let Ok(mut stats) = self.stats.write() else { + return; // If poisoned, skip recording }; - stats.success_count += 1; if stats.state == CircuitState::HalfOpen { @@ -135,9 +132,8 @@ impl CircuitBreakerService { /// Record a failure and potentially open the circuit pub fn record_failure(&self) { - let mut stats = match self.stats.write() { - Ok(stats) => stats, - Err(_) => return, // If poisoned, skip recording + let Ok(mut stats) = self.stats.write() else { + return; // If poisoned, skip recording }; stats.failure_count += 1; @@ -158,9 +154,8 @@ impl CircuitBreakerService { } fn can_make_request(&self) -> bool { - let stats = match self.stats.read() { - Ok(stats) => stats, - Err(_) => return false, // If poisoned, reject request + let Ok(stats) = self.stats.read() else { + return false; // If poisoned, reject request }; match stats.state { diff --git a/apalis-core/src/worker/mod.rs b/apalis-core/src/worker/mod.rs index 616ce492..804cc1b9 100644 --- a/apalis-core/src/worker/mod.rs +++ b/apalis-core/src/worker/mod.rs @@ -605,7 +605,6 @@ where } #[cfg(test)] -#[cfg(feature = "json")] mod tests { use std::{ future::ready, @@ -618,7 +617,7 @@ mod tests { use futures_core::future::BoxFuture; use crate::{ - backend::{TaskSink, json::JsonStorage, memory::MemoryStorage}, + backend::{TaskSink, memory::MemoryStorage}, task::Parts, worker::{ builder::WorkerBuilder, @@ -637,9 +636,9 @@ mod tests { #[tokio::test] async fn basic_worker_run() { - let mut json_store = JsonStorage::new_temp().unwrap(); + let mut in_memory = MemoryStorage::new(); for i in 0..ITEMS { - json_store.push(i.into()).await.unwrap(); + in_memory.push(i.into()).await.unwrap(); } #[derive(Clone, Debug, Default)] @@ -684,7 +683,7 @@ mod tests { } let worker = WorkerBuilder::new("rango-tango") - .backend(json_store) + .backend(in_memory) .data(Count::default()) .break_circuit() .long_running() diff --git a/apalis-sql/Cargo.toml b/apalis-sql/Cargo.toml index 2b29ce00..73fa3cca 100644 --- a/apalis-sql/Cargo.toml +++ b/apalis-sql/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-sql" -version = "1.0.0-beta.2" +version = "1.0.0-rc.1" authors = ["Njuguna Mureithi "] edition.workspace = true repository.workspace = true @@ -13,9 +13,9 @@ keywords = ["job", "task", "scheduler", "worker", "sql"] categories = ["database", "asynchronous"] [dependencies] -apalis-core = { path = "../apalis-core", version = "1.0.0-beta.2", default-features = false, features = [ +apalis-core = { path = "../apalis-core", version = "1.0.0-rc.1", default-features = false, features = [ "sleep", - "json", + "serde", ] } serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/apalis-workflow/Cargo.toml b/apalis-workflow/Cargo.toml index 00cc6c01..b8fa5508 100644 --- a/apalis-workflow/Cargo.toml +++ b/apalis-workflow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis-workflow" -version = "0.1.0-beta.2" +version = "0.1.0-rc.1" edition.workspace = true repository.workspace = true authors = ["Njuguna Mureithi "] @@ -16,14 +16,16 @@ tracing = ["dep:tracing", "apalis-core/tracing"] ulid = ["dep:ulid"] ## Enable uuid support for task ids uuid = ["dep:uuid"] +## Enable rand support for task ids using integers +rand = ["dep:rand"] [dependencies] petgraph = { version = "0.8.2", features = ["serde-1"] } serde = { version = "1.0", features = ["derive"] } tower = { version = "0.5", features = ["util"], default-features = false } -apalis-core = { path = "../apalis-core", version = "1.0.0-beta.2", default-features = false, features = [ +apalis-core = { path = "../apalis-core", version = "1.0.0-rc.1", default-features = false, features = [ "sleep", - "json", + "serde", ] } futures = "0.3.30" thiserror = "2.0.0" @@ -31,9 +33,11 @@ thiserror = "2.0.0" tracing = { version = "0.1.41", default-features = false, optional = true } ulid = { version = "1", optional = true } uuid = { version = "1", features = ["v4"], optional = true } +rand = { version = "0.9", features = ["std"], optional = true } [dev-dependencies] tokio = { version = "1.37.0", features = ["macros", "rt-multi-thread", "sync"] } +apalis-file-storage = { path = "../utils/apalis-file-storage" } serde_json = "1" [lints] diff --git a/apalis-workflow/README.md b/apalis-workflow/README.md index d7984cbb..e53953d4 100644 --- a/apalis-workflow/README.md +++ b/apalis-workflow/README.md @@ -22,7 +22,7 @@ Workflows are built by composing steps, and can be executed using supported back ```rust,ignore use apalis::prelude::*; use apalis_workflow::*; -use apalis_core::backend::json::JsonStorage; +use apalis_file_storage::JsonStorage;; #[tokio::main] async fn main() { @@ -55,7 +55,7 @@ You can track your workflows using [apalis-board](https://github.com/apalis-dev/ ## Backend Support -- [x] [JSONStorage](https://docs.rs/apalis-core/1.0.0-beta.2/apalis_core/backend/json/struct.JsonStorage.html) +- [x] [JSONStorage](https://docs.rs/apalis-core/1.0.0-rc.1/apalis_core/backend/json/struct.JsonStorage.html) - [x] [SqliteStorage](https://docs.rs/apalis-sqlite#workflow-example) - [x] [RedisStorage](https://docs.rs/apalis-redis#workflow-example) - [x] [PostgresStorage](https://docs.rs/apalis-postgres#workflow-example) diff --git a/apalis-workflow/src/dag/mod.rs b/apalis-workflow/src/dag/mod.rs index d478b9fd..fce0aaca 100644 --- a/apalis-workflow/src/dag/mod.rs +++ b/apalis-workflow/src/dag/mod.rs @@ -280,8 +280,7 @@ mod tests { }; use apalis_core::{ - backend::json::JsonStorage, error::BoxDynError, task::Task, task_fn::task_fn, - worker::context::WorkerContext, + error::BoxDynError, task::Task, task_fn::task_fn, worker::context::WorkerContext, }; use petgraph::graph::NodeIndex; use serde_json::Value; diff --git a/apalis-workflow/src/id_generator.rs b/apalis-workflow/src/id_generator.rs index 7a349246..cdbaf8e0 100644 --- a/apalis-workflow/src/id_generator.rs +++ b/apalis-workflow/src/id_generator.rs @@ -25,3 +25,30 @@ impl GenerateId for RandomId { Self::default() } } + +#[cfg(feature = "rand")] +impl GenerateId for u64 { + fn generate() -> Self { + rand::random::() + } +} +#[cfg(feature = "rand")] +impl GenerateId for i64 { + fn generate() -> Self { + rand::random::() + } +} + +#[cfg(feature = "rand")] +impl GenerateId for u128 { + fn generate() -> Self { + rand::random::() + } +} + +#[cfg(feature = "rand")] +impl GenerateId for i128 { + fn generate() -> Self { + rand::random::() + } +} diff --git a/apalis-workflow/src/lib.rs b/apalis-workflow/src/lib.rs index ba467fd5..b73d14c8 100644 --- a/apalis-workflow/src/lib.rs +++ b/apalis-workflow/src/lib.rs @@ -49,7 +49,6 @@ mod tests { use std::{collections::HashMap, time::Duration}; use apalis_core::{ - backend::json::JsonStorage, task::{builder::TaskBuilder, task_id::TaskId}, task_fn::task_fn, worker::{ @@ -57,6 +56,7 @@ mod tests { ext::event_listener::EventListenerExt, }, }; + use apalis_file_storage::JsonStorage; use futures::SinkExt; use serde_json::Value; @@ -75,7 +75,7 @@ mod tests { .add_step(AndThen::new(task_fn(async |input: usize| { Ok::<_, BoxDynError>(input.to_string()) }))) - .and_then(async |input: String, _task_id: TaskId| input.parse::()) + .and_then(async |input: String, _task_id: TaskId<_>| input.parse::()) .and_then(async |res: usize| { Ok::<_, BoxDynError>((0..res).enumerate().collect::>()) }) @@ -99,15 +99,15 @@ mod tests { println!("Completed with {res:?}"); }); - let mut in_memory: JsonStorage = JsonStorage::new_temp().unwrap(); + let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); - in_memory + backend .send(TaskBuilder::new(Value::from(17)).build()) .await .unwrap(); let worker = WorkerBuilder::new("rango-tango") - .backend(in_memory) + .backend(backend) .on_event(|ctx, ev| { println!("On Event = {ev:?}"); if matches!(ev, Event::Error(_)) { diff --git a/apalis/Cargo.toml b/apalis/Cargo.toml index 7e30f496..0512e30e 100644 --- a/apalis/Cargo.toml +++ b/apalis/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "apalis" -version = "1.0.0-beta.2" +version = "1.0.0-rc.1" authors = ["Geoffrey Mureithi "] description = "Simple, extensible and multithreaded background task processing for Rust" edition.workspace = true @@ -59,7 +59,7 @@ full = [ docsrs = ["document-features"] [dependencies.apalis-core] -version = "1.0.0-beta.2" +version = "1.0.0-rc.1" default-features = false path = "../apalis-core" @@ -86,10 +86,11 @@ version = "0.1.40" optional = true [dev-dependencies] -apalis-core = { path = "../apalis-core", version = "1.0.0-beta.2" } +apalis-core = { path = "../apalis-core", version = "1.0.0-rc.1" } serde = "1" tokio = { version = "1", features = ["full"] } apalis-workflow = { path = "../apalis-workflow", version = "0.1.0-alpha.6" } # For README +apalis-file-storage = { path = "../utils/apalis-file-storage" } # For README [package.metadata.docs.rs] # defines the configuration attribute `docsrs` diff --git a/apalis/README.md b/apalis/README.md index b2635c42..3cb6fe98 100644 --- a/apalis/README.md +++ b/apalis/README.md @@ -32,7 +32,7 @@ ## Features -- **Simple and predictable task handling** - [Task handlers](https://docs.rs/apalis-core/1.0.0-beta.2/apalis_core/task_fn/guide/index.html) are just async functions with a macro-free API +- **Simple and predictable task handling** - [Task handlers](https://docs.rs/apalis-core/1.0.0-rc.1/apalis_core/task_fn/guide/index.html) are just async functions with a macro-free API - **Robust task execution** - Built-in support for retries, timeouts, and error handling - **Multiple storage backends** - Support for Redis, PostgreSQL, SQLite, and in-memory storage - **Advanced task management** - Task prioritization, scheduling, metadata, and result tracking @@ -64,7 +64,7 @@ To get started, just add to Cargo.toml ```toml [dependencies] -apalis = { version = "1.0.0-beta.2" } +apalis = { version = "1.0.0-rc.1" } # apalis-redis = { version = "1.0.0-alpha.1" } # Use redis/sqlite/postgres etc ``` @@ -114,7 +114,7 @@ async fn main() { use apalis::prelude::*; use apalis_workflow::*; use std::time::Duration; -use apalis_core::backend::json::JsonStorage; +use apalis_file_storage::JsonStorage;; #[tokio::main] async fn main() { @@ -143,18 +143,6 @@ async fn main() { For more functionality like `fold`, `filter_map` and other combinators checkout the [docs](https://docs.rs/apalis-workflow) -## Feature flags - -- _full_ - All the available features -- _tracing_ (enabled by default) — Support Tracing 👀 -- _sentry_ — Support for Sentry exception and performance monitoring -- _prometheus_ — Support Prometheus metrics -- _retry_ — Support direct retrying tasks -- _timeout_ — Support timeouts on tasks -- _limit_ — Support for concurrency and rate-limiting -- _filter_ — Support filtering tasks based on a predicate -- _catch-panic_ - Catch panics that occur during execution - ## How apalis works Here is a basic example of how the core parts integrate diff --git a/apalis/src/lib.rs b/apalis/src/lib.rs index 0e61f497..a55a6bc6 100644 --- a/apalis/src/lib.rs +++ b/apalis/src/lib.rs @@ -42,7 +42,10 @@ pub mod prelude { task::data::{AddExtension, Data}, task::extensions::Extensions, task::metadata::MetadataExt, + task::status::Status, + task::task_id::RandomId, task::task_id::TaskId, + task::task_id::TaskIdError, task_fn::{FromRequest, IntoResponse, TaskFn, task_fn}, worker::builder::*, worker::ext::{ diff --git a/deny.toml b/deny.toml new file mode 100644 index 00000000..bd776cf3 --- /dev/null +++ b/deny.toml @@ -0,0 +1,235 @@ +# This template contains all of the possible sections and their default values + +# Note that all fields that take a lint level have these possible values: +# * deny - An error will be produced and the check will fail +# * warn - A warning will be produced, but the check will not fail +# * allow - No warning or error will be produced, though in some cases a note +# will be + +# The values provided in this template are the default values that will be used +# when any section or field is not specified in your own configuration + +# Root options + +# The graph table configures how the dependency graph is constructed and thus +# which crates the checks are performed against +[graph] +# If 1 or more target triples (and optionally, target_features) are specified, +# only the specified targets will be checked when running `cargo deny check`. +# This means, if a particular package is only ever used as a target specific +# dependency, such as, for example, the `nix` crate only being used via the +# `target_family = "unix"` configuration, that only having windows targets in +# this list would mean the nix crate, as well as any of its exclusive +# dependencies not shared by any other crates, would be ignored, as the target +# list here is effectively saying which targets you are building for. +targets = [ + # The triple can be any string, but only the target triples built in to + # rustc (as of 1.40) can be checked against actual config expressions + #"x86_64-unknown-linux-musl", + # You can also specify which target_features you promise are enabled for a + # particular target. target_features are currently not validated against + # the actual valid features supported by the target architecture. + #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, +] +# When creating the dependency graph used as the source of truth when checks are +# executed, this field can be used to prune crates from the graph, removing them +# from the view of cargo-deny. This is an extremely heavy hammer, as if a crate +# is pruned from the graph, all of its dependencies will also be pruned unless +# they are connected to another crate in the graph that hasn't been pruned, +# so it should be used with care. The identifiers are [Package ID Specifications] +# (https://doc.rust-lang.org/cargo/reference/pkgid-spec.html) +#exclude = [] +# If true, metadata will be collected with `--all-features`. Note that this can't +# be toggled off if true, if you want to conditionally enable `--all-features` it +# is recommended to pass `--all-features` on the cmd line instead +all-features = false +# If true, metadata will be collected with `--no-default-features`. The same +# caveat with `all-features` applies +no-default-features = false +# If set, these feature will be enabled when collecting metadata. If `--features` +# is specified on the cmd line they will take precedence over this option. +#features = [] + +# The output table provides options for how/if diagnostics are outputted +[output] +# When outputting inclusion graphs in diagnostics that include features, this +# option can be used to specify the depth at which feature edges will be added. +# This option is included since the graphs can be quite large and the addition +# of features from the crate(s) to all of the graph roots can be far too verbose. +# This option can be overridden via `--feature-depth` on the cmd line +feature-depth = 1 + +# This section is considered when running `cargo deny check advisories` +# More documentation for the advisories section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html +[advisories] +# The path where the advisory databases are cloned/fetched into +#db-path = "$CARGO_HOME/advisory-dbs" +# The url(s) of the advisory databases to use +#db-urls = ["https://github.com/rustsec/advisory-db"] +# A list of advisory IDs to ignore. Note that ignored advisories will still +# output a note when they are encountered. +ignore = [ + #"RUSTSEC-0000-0000", + #{ id = "RUSTSEC-0000-0000", reason = "you can specify a reason the advisory is ignored" }, + #"a-crate-that-is-yanked@0.1.1", # you can also ignore yanked crate versions if you wish + #{ crate = "a-crate-that-is-yanked@0.1.1", reason = "you can specify why you are ignoring the yanked crate" }, +] +# If this is true, then cargo deny will use the git executable to fetch advisory database. +# If this is false, then it uses a built-in git library. +# Setting this to true can be helpful if you have special authentication requirements that cargo-deny does not support. +# See Git Authentication for more information about setting up git authentication. +#git-fetch-with-cli = true + +# This section is considered when running `cargo deny check licenses` +# More documentation for the licenses section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html +[licenses] +# List of explicitly allowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.11 short identifier (+ optional exception)]. +allow = [ + #"MIT", + #"Apache-2.0", + #"Apache-2.0 WITH LLVM-exception", +] +# The confidence threshold for detecting a license from license text. +# The higher the value, the more closely the license text must be to the +# canonical license text of a valid SPDX license file. +# [possible values: any between 0.0 and 1.0]. +confidence-threshold = 0.8 +# Allow 1 or more licenses on a per-crate basis, so that particular licenses +# aren't accepted for every possible crate as with the normal allow list +exceptions = [ + # Each entry is the crate and version constraint, and its specific allow + # list + #{ allow = ["Zlib"], crate = "adler32" }, +] + +# Some crates don't have (easily) machine readable licensing information, +# adding a clarification entry for it allows you to manually specify the +# licensing information +#[[licenses.clarify]] +# The package spec the clarification applies to +#crate = "ring" +# The SPDX expression for the license requirements of the crate +#expression = "MIT AND ISC AND OpenSSL" +# One or more files in the crate's source used as the "source of truth" for +# the license expression. If the contents match, the clarification will be used +# when running the license check, otherwise the clarification will be ignored +# and the crate will be checked normally, which may produce warnings or errors +# depending on the rest of your configuration +#license-files = [ +# Each entry is a crate relative path, and the (opaque) hash of its contents +#{ path = "LICENSE", hash = 0xbd0eed23 } +#] + +[licenses.private] +# If true, ignores workspace crates that aren't published, or are only +# published to private registries. +# To see how to mark a crate as unpublished (to the official registry), +# visit https://doc.rust-lang.org/cargo/reference/manifest.html#the-publish-field. +ignore = false +# One or more private registries that you might publish crates to, if a crate +# is only published to private registries, and ignore is true, the crate will +# not have its license(s) checked +registries = [ + #"https://sekretz.com/registry +] + +# This section is considered when running `cargo deny check bans`. +# More documentation about the 'bans' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html +[bans] +# Lint level for when multiple versions of the same crate are detected +multiple-versions = "warn" +# Lint level for when a crate version requirement is `*` +wildcards = "allow" +# The graph highlighting used when creating dotgraphs for crates +# with multiple versions +# * lowest-version - The path to the lowest versioned duplicate is highlighted +# * simplest-path - The path to the version with the fewest edges is highlighted +# * all - Both lowest-version and simplest-path are used +highlight = "all" +# The default lint level for `default` features for crates that are members of +# the workspace that is being checked. This can be overridden by allowing/denying +# `default` on a crate-by-crate basis if desired. +workspace-default-features = "allow" +# The default lint level for `default` features for external crates that are not +# members of the workspace. This can be overridden by allowing/denying `default` +# on a crate-by-crate basis if desired. +external-default-features = "allow" +# List of crates that are allowed. Use with care! +allow = [ + #"ansi_term@0.11.0", + #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is allowed" }, +] +# List of crates to deny +deny = [ + #"ansi_term@0.11.0", + #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason it is banned" }, + # Wrapper crates can optionally be specified to allow the crate when it + # is a direct dependency of the otherwise banned crate + #{ crate = "ansi_term@0.11.0", wrappers = ["this-crate-directly-depends-on-ansi_term"] }, +] + +# List of features to allow/deny +# Each entry the name of a crate and a version range. If version is +# not specified, all versions will be matched. +#[[bans.features]] +#crate = "reqwest" +# Features to not allow +#deny = ["json"] +# Features to allow +#allow = [ +# "rustls", +# "__rustls", +# "__tls", +# "hyper-rustls", +# "rustls", +# "rustls-pemfile", +# "rustls-tls-webpki-roots", +# "tokio-rustls", +# "webpki-roots", +#] +# If true, the allowed features must exactly match the enabled feature set. If +# this is set there is no point setting `deny` +#exact = true + +# Certain crates/versions that will be skipped when doing duplicate detection. +skip = [ + #"ansi_term@0.11.0", + #{ crate = "ansi_term@0.11.0", reason = "you can specify a reason why it can't be updated/removed" }, +] +# Similarly to `skip` allows you to skip certain crates during duplicate +# detection. Unlike skip, it also includes the entire tree of transitive +# dependencies starting at the specified crate, up to a certain depth, which is +# by default infinite. +skip-tree = [ + #"ansi_term@0.11.0", # will be skipped along with _all_ of its direct and transitive dependencies + #{ crate = "ansi_term@0.11.0", depth = 20 }, +] + +# This section is considered when running `cargo deny check sources`. +# More documentation about the 'sources' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html +[sources] +# Lint level for what to happen when a crate from a crate registry that is not +# in the allow list is encountered +unknown-registry = "warn" +# Lint level for what to happen when a crate from a git repository that is not +# in the allow list is encountered +unknown-git = "warn" +# List of URLs for allowed crate registries. Defaults to the crates.io index +# if not specified. If it is specified but empty, no registries are allowed. +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +# List of URLs for allowed Git repositories +allow-git = [] + +[sources.allow-org] +# github.com organizations to allow git sources for +github = [] +# gitlab.com organizations to allow git sources for +gitlab = [] +# bitbucket.org organizations to allow git sources for +bitbucket = [] diff --git a/examples/basics/Cargo.toml b/examples/basics/Cargo.toml index 7f816a3c..1a6ce6fe 100644 --- a/examples/basics/Cargo.toml +++ b/examples/basics/Cargo.toml @@ -9,7 +9,7 @@ license = "MIT OR Apache-2.0" thiserror = "2.0.0" tokio = { version = "1", features = ["full"] } apalis = { path = "../../apalis", features = ["limit", "catch-panic", "tracing"] } -apalis-core = { path = "../../apalis-core" , features = ["json"]} +apalis-core = { path = "../../apalis-core" , features = ["serde"]} serde = "1" serde_json = "1" tracing-subscriber = "0.3.20" diff --git a/examples/basics/src/main.rs b/examples/basics/src/main.rs index 2d2c47b7..6992b4fb 100644 --- a/examples/basics/src/main.rs +++ b/examples/basics/src/main.rs @@ -6,7 +6,6 @@ use std::time::Duration; use apalis::{layers::catch_panic::CatchPanicLayer, prelude::*}; -use apalis_core::backend::json::JsonStorage; use email_service::Email; use layer::LogLayer; @@ -14,7 +13,7 @@ use tracing::{Instrument, Span, log::info}; use crate::{cache::ValidEmailCache, expensive_client::EmailService}; -async fn produce_jobs(storage: &mut JsonStorage) { +async fn produce_jobs(storage: &mut MemoryStorage) { for i in 0..5 { storage .push(Email { @@ -82,40 +81,32 @@ async fn main() -> Result<(), std::io::Error> { tracing_subscriber::fmt::init(); - let mut backend = JsonStorage::new_temp().unwrap(); + let mut backend = MemoryStorage::new(); produce_jobs(&mut backend).await; - Monitor::new() - .register(move |_runs: usize| { - WorkerBuilder::new("tasty-banana") - .backend(backend.clone()) - .enable_tracing() - // This handles any panics that may occur in any of the layers below - // .catch_panic() - // Or just to customize (do not include both) - .layer(CatchPanicLayer::with_panic_handler(|e| { - let panic_info = if let Some(s) = e.downcast_ref::<&str>() { - s.to_string() - } else if let Some(s) = e.downcast_ref::() { - s.clone() - } else { - "Unknown panic".to_string() - }; - // Abort tells the backend to kill job - AbortError::new(PanicError::Panic(panic_info)) - })) - .layer(LogLayer::new("some-log-example")) - // Add shared context to all jobs executed by this worker - .data(EmailService::new()) - .data(ValidEmailCache::new()) - .build(send_email) - }) - .should_restart(|_ctx, last_err, _current_run| { - !matches!(last_err, WorkerError::GracefulExit) // Don't restart on graceful exit - }) - .shutdown_timeout(Duration::from_secs(5)) - // Use .run() if you don't want without signals - .run_with_signal(tokio::signal::ctrl_c()) // This will wait for ctrl+c then gracefully shutdown + WorkerBuilder::new("tasty-banana") + .backend(backend) + .enable_tracing() + // This handles any panics that may occur in any of the layers below + // .catch_panic() + // Or just to customize (do not include both) + .layer(CatchPanicLayer::with_panic_handler(|e| { + let panic_info = if let Some(s) = e.downcast_ref::<&str>() { + s.to_string() + } else if let Some(s) = e.downcast_ref::() { + s.clone() + } else { + "Unknown panic".to_string() + }; + // Abort tells the backend to kill job + AbortError::new(PanicError::Panic(panic_info)) + })) + .layer(LogLayer::new("some-log-example")) + // Add shared context to all jobs executed by this worker + .data(EmailService::new()) + .data(ValidEmailCache::new()) + .build(send_email) + .run_until(tokio::signal::ctrl_c()) // This will wait for ctrl+c then gracefully shutdown .await .unwrap(); Ok(()) diff --git a/examples/fn-args/src/main.rs b/examples/fn-args/src/main.rs index 20a96db2..f5bd6d8e 100644 --- a/examples/fn-args/src/main.rs +++ b/examples/fn-args/src/main.rs @@ -17,7 +17,7 @@ struct SimpleJob {} async fn simple_job( _: SimpleJob, // Required, must be of the type of the job/message and the first argument worker: WorkerContext, // The worker and its context, added by worker - task_id: TaskId, // The task id, added by storage + task_id: TaskId, // The task id, added by storage attempt: Attempt, // The current attempt count: Data, // Our custom data added via layer ) { diff --git a/examples/graceful-shutdown/Cargo.toml b/examples/graceful-shutdown/Cargo.toml index 8d6c3a82..703cfe2f 100644 --- a/examples/graceful-shutdown/Cargo.toml +++ b/examples/graceful-shutdown/Cargo.toml @@ -8,7 +8,7 @@ repository.workspace = true thiserror = "2.0.0" tokio = { version = "1", features = ["full"] } apalis = { path = "../../apalis", features = ["limit", "catch-panic"] } -apalis-core = { path = "../../apalis-core", features = ["json"] } +apalis-core = { path = "../../apalis-core", features = ["serde"] } serde = "1" tracing-subscriber = "0.3.20" futures = "0.3" diff --git a/examples/graceful-shutdown/src/main.rs b/examples/graceful-shutdown/src/main.rs index c06d1559..44301f4b 100644 --- a/examples/graceful-shutdown/src/main.rs +++ b/examples/graceful-shutdown/src/main.rs @@ -1,11 +1,10 @@ use std::time::Duration; use apalis::prelude::*; -use apalis_core::backend::json::JsonStorage; use serde::{Deserialize, Serialize}; use tracing::info; -#[derive(Debug, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize)] struct LongRunningJob {} async fn long_running_task(_task: LongRunningJob, worker: WorkerContext) { @@ -22,29 +21,25 @@ async fn long_running_task(_task: LongRunningJob, worker: WorkerContext) { info!("Shutdown complete!"); } -async fn produce_jobs(storage: &mut JsonStorage) { +async fn produce_jobs(storage: &mut MemoryStorage) { storage.push(LongRunningJob {}).await.unwrap(); } #[tokio::main] -async fn main() -> Result<(), MonitorError> { +async fn main() -> Result<(), WorkerError> { unsafe { std::env::set_var("RUST_LOG", "debug"); } tracing_subscriber::fmt::init(); - let mut backend = JsonStorage::new_temp().unwrap(); + let mut backend = MemoryStorage::new(); produce_jobs(&mut backend).await; - Monitor::new() - .register(move |_runs| { - WorkerBuilder::new("tasty-banana") - .backend(backend.clone()) - .enable_tracing() - .concurrency(2) - .on_event(|_c, e| info!("{e}")) - .build(long_running_task) - }) - .shutdown_timeout(Duration::from_secs(10)) - .run_with_signal(tokio::signal::ctrl_c()) + WorkerBuilder::new("tasty-banana") + .backend(backend) + .enable_tracing() + .concurrency(2) + .on_event(|_c, e| info!("{e}")) + .build(long_running_task) + .run_until(tokio::signal::ctrl_c()) .await?; Ok(()) } diff --git a/examples/prometheus/Cargo.toml b/examples/prometheus/Cargo.toml index a4a7db1b..1c091dc7 100644 --- a/examples/prometheus/Cargo.toml +++ b/examples/prometheus/Cargo.toml @@ -12,7 +12,8 @@ tracing = "0.1.41" tracing-subscriber = { version = "0.3.20", features = ["env-filter"] } serde = { version = "1.0", features = ["derive"] } apalis = { path = "../../apalis", features = ["prometheus"] } -apalis-core = { path = "../../apalis-core", features = ["json"] } +apalis-core = { path = "../../apalis-core", features = ["serde"] } futures = "0.3" metrics-exporter-prometheus = "0.17" email-service = { path = "../email-service" } +apalis-file-storage = { path = "../../utils/apalis-file-storage" } diff --git a/examples/prometheus/src/main.rs b/examples/prometheus/src/main.rs index 390f22ca..7c002ba9 100644 --- a/examples/prometheus/src/main.rs +++ b/examples/prometheus/src/main.rs @@ -6,7 +6,7 @@ use anyhow::Result; use apalis::layers::prometheus::PrometheusLayer; use apalis::prelude::*; -use apalis_core::backend::json::JsonStorage; +use apalis_file_storage::JsonStorage; use axum::{ extract::Form, http::StatusCode, diff --git a/examples/workflow/Cargo.toml b/examples/workflow/Cargo.toml index 46287e47..ba4dc320 100644 --- a/examples/workflow/Cargo.toml +++ b/examples/workflow/Cargo.toml @@ -9,7 +9,8 @@ tower = { version = "0.5", features = ["util"] } tokio = { version = "1", features = ["full"] } apalis = { path = "../../apalis", features = ["limit", "catch-panic", "retry"] } apalis-workflow = { path = "../../apalis-workflow" } -apalis-core = { path = "../../apalis-core", features = ["json"] } +apalis-core = { path = "../../apalis-core", features = ["serde"] } +apalis-file-storage = { path = "../../utils/apalis-file-storage" } serde = { version = "1", features = ["derive"] } serde_json = "1" tracing-subscriber = "0.3.20" diff --git a/examples/workflow/src/main.rs b/examples/workflow/src/main.rs index 749bf57e..8edb06e1 100644 --- a/examples/workflow/src/main.rs +++ b/examples/workflow/src/main.rs @@ -1,7 +1,7 @@ use std::{fmt::Debug, time::Duration}; use apalis::prelude::*; -use apalis_core::backend::json::JsonStorage; +use apalis_file_storage::JsonStorage; use apalis_workflow::{Workflow, WorkflowSink}; use serde::{Deserialize, Serialize}; use tracing::info; diff --git a/supply-chain/config.toml b/supply-chain/config.toml index 783d28f8..832194fa 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -4,9 +4,15 @@ [cargo-vet] version = "0.10" +[policy.apalis] +audit-as-crates-io = true + [policy.apalis-core] audit-as-crates-io = true +[policy.apalis-sql] +audit-as-crates-io = true + [policy.apalis-workflow] audit-as-crates-io = true @@ -47,11 +53,11 @@ version = "1.0.100" criteria = "safe-to-deploy" [[exemptions.apalis-core]] -version = "1.0.0-beta.2" +version = "1.0.0-rc.1" criteria = "safe-to-deploy" [[exemptions.apalis-workflow]] -version = "0.1.0-beta.2" +version = "0.1.0-rc.1" criteria = "safe-to-deploy" [[exemptions.arrayvec]] diff --git a/utils/apalis-codec/Cargo.toml b/utils/apalis-codec/Cargo.toml new file mode 100644 index 00000000..c76f1ee5 --- /dev/null +++ b/utils/apalis-codec/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "apalis-codec" +version = "0.1.0-rc.1" +rust-version.workspace = true +edition.workspace = true +repository.workspace = true + +[features] +default = ["json"] +## Enable serde_json support +json = ["apalis-core/serde", "dep:serde", "dep:serde_json"] +bincode = ["dep:bincode"] +msgpack = ["dep:rmp-serde", "dep:serde", "apalis-core/serde", "dep:thiserror"] + +[dependencies] +apalis-core = { path = "../../apalis-core", version = "1.0.0-rc.1", default-features = false } +serde = { version = "1.0", features = ["derive"], optional = true } +serde_json = { version = "1", optional = true } +bincode = { version = "2.0.1", optional = true } +rmp-serde = { version = "1.3.0", optional = true } +thiserror = { version = "2.0.0", optional = true } + +[lints] +workspace = true diff --git a/utils/apalis-codec/README.md b/utils/apalis-codec/README.md new file mode 100644 index 00000000..1d63d18d --- /dev/null +++ b/utils/apalis-codec/README.md @@ -0,0 +1,28 @@ +# apalis-codec + +A codec utility crate for apalis backends + +## Overview + +`apalis-codec` provides serialization and deserialization utilities for encoding and decoding job payloads in apalis. +It handles the conversion of task data to and from various formats for reliable storage and transmission. + +## Features + +- Multiple codec support (JSON, MessagePack, etc.) +- Type-safe serialization +- Error handling for codec operations +- Integration with all apalis backends with codec support via `BackendExt` + +## Usage + +Add this to your `Cargo.toml`: + +```toml +[dependencies] +apalis-codec = { version = "0.1", features = ["msgpack"] } +``` + +## License + +Licensed under the same terms as the apalis project. diff --git a/utils/apalis-codec/src/bincode.rs b/utils/apalis-codec/src/bincode.rs new file mode 100644 index 00000000..9b66eac9 --- /dev/null +++ b/utils/apalis-codec/src/bincode.rs @@ -0,0 +1,54 @@ +use apalis_core::backend::codec::Codec; +use bincode::{Decode, Encode}; + +/// Bincode encoding and decoding +#[derive(Debug, Clone, Default)] +pub struct BincodeCodec; + +/// Errors that can occur during Bincode encoding/decoding +#[derive(thiserror::Error, Debug)] +pub enum BincodeCodecError { + /// Error during encoding + #[error("Encoding error: {0}")] + EncodeError(#[from] bincode::error::EncodeError), + /// Error during decoding + #[error("Decoding error: {0}")] + DecodeError(#[from] bincode::error::DecodeError), +} + +impl> Codec for BincodeCodec { + type Compact = Vec; + type Error = BincodeCodecError; + fn encode(input: &T) -> Result, Self::Error> { + let config = bincode::config::standard(); + Ok(bincode::encode_to_vec(input, config)?) + } + + fn decode(compact: &Vec) -> Result { + let config = bincode::config::standard(); + Ok(bincode::decode_from_slice(compact, config)?.0) + } +} +#[cfg(test)] +mod tests { + use super::*; + + #[derive(Encode, Decode, Debug, PartialEq)] + struct TestData { + id: u32, + name: String, + } + + #[test] + fn test_encode_decode_roundtrip() { + let original = TestData { + id: 42, + name: "test".to_string(), + }; + + let encoded = BincodeCodec::encode(&original).expect("encoding failed"); + let decoded: TestData = BincodeCodec::decode(&encoded).expect("decoding failed"); + + assert_eq!(original, decoded); + } +} diff --git a/utils/apalis-codec/src/json.rs b/utils/apalis-codec/src/json.rs new file mode 100644 index 00000000..792ed7f5 --- /dev/null +++ b/utils/apalis-codec/src/json.rs @@ -0,0 +1,90 @@ +use std::marker::PhantomData; + +use apalis_core::backend::codec::Codec; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +/// Json encoding and decoding +#[derive(Debug, Clone, Default)] +pub struct JsonCodec { + _o: PhantomData, +} + +impl Deserialize<'de>> Codec for JsonCodec> { + type Compact = Vec; + type Error = serde_json::Error; + fn encode(input: &T) -> Result, Self::Error> { + serde_json::to_vec(input) + } + + fn decode(compact: &Vec) -> Result { + serde_json::from_slice(compact) + } +} + +impl Deserialize<'de>> Codec for JsonCodec { + type Compact = String; + type Error = serde_json::Error; + fn encode(input: &T) -> Result { + serde_json::to_string(input) + } + fn decode(compact: &String) -> Result { + serde_json::from_str(compact) + } +} + +impl Deserialize<'de>> Codec for JsonCodec { + type Compact = Value; + type Error = serde_json::Error; + fn encode(input: &T) -> Result { + serde_json::to_value(input) + } + + fn decode(compact: &Value) -> Result { + T::deserialize(compact) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(Serialize, Deserialize, Debug, PartialEq)] + struct TestStruct { + id: u32, + name: String, + } + + #[test] + fn test_json_codec_vec_u8_roundtrip() { + let original = TestStruct { + id: 1, + name: "Test".to_string(), + }; + let encoded = JsonCodec::>::encode(&original).unwrap(); + let decoded: TestStruct = JsonCodec::>::decode(&encoded).unwrap(); + assert_eq!(original, decoded); + } + + #[test] + fn test_json_codec_string_roundtrip() { + let original = TestStruct { + id: 2, + name: "Example".to_string(), + }; + let encoded = JsonCodec::::encode(&original).unwrap(); + let decoded: TestStruct = JsonCodec::::decode(&encoded).unwrap(); + assert_eq!(original, decoded); + } + + #[test] + fn test_json_codec_value_roundtrip() { + let original = TestStruct { + id: 3, + name: "Sample".to_string(), + }; + let encoded = JsonCodec::::encode(&original).unwrap(); + let decoded: TestStruct = JsonCodec::::decode(&encoded).unwrap(); + assert_eq!(original, decoded); + } +} diff --git a/utils/apalis-codec/src/lib.rs b/utils/apalis-codec/src/lib.rs new file mode 100644 index 00000000..64a10b35 --- /dev/null +++ b/utils/apalis-codec/src/lib.rs @@ -0,0 +1,15 @@ +//! Utility codecs for apalis +//! +//! Supports different encoding and decoding strategies for task arguments and results. + +/// Encoding for tasks using json +#[cfg(feature = "json")] +pub mod json; + +/// Encoding for tasks using MessagePack +#[cfg(feature = "msgpack")] +pub mod msgpack; + +/// Encoding for tasks using bincode +#[cfg(feature = "bincode")] +pub mod bincode; diff --git a/utils/apalis-codec/src/msgpack.rs b/utils/apalis-codec/src/msgpack.rs new file mode 100644 index 00000000..d7f6435e --- /dev/null +++ b/utils/apalis-codec/src/msgpack.rs @@ -0,0 +1,56 @@ +use apalis_core::backend::codec::Codec; +use serde::{Deserialize, Serialize}; + +/// MsgPack encoding and decoding +#[derive(Debug, Clone, Default)] +pub struct MsgPackCodec; + +/// Errors that can occur during MsgPack encoding/decoding +#[derive(thiserror::Error, Debug)] +pub enum MsgPackCodecError { + /// Error during encoding + #[error("Encoding error: {0}")] + EncodeError(#[from] rmp_serde::encode::Error), + /// Error during decoding + #[error("Decoding error: {0}")] + DecodeError(#[from] rmp_serde::decode::Error), +} + +impl Deserialize<'de>> Codec for MsgPackCodec { + type Compact = Vec; + type Error = MsgPackCodecError; + fn encode(input: &T) -> Result, Self::Error> { + Ok(rmp_serde::to_vec(input)?) + } + + fn decode(compact: &Vec) -> Result { + Ok(rmp_serde::from_slice(compact)?) + } +} +#[cfg(test)] +mod tests { + use super::*; + + #[derive(Serialize, Deserialize, Debug, PartialEq)] + struct TestStruct { + id: u32, + name: String, + } + + #[test] + fn test_encode_decode_roundtrip() { + let original = TestStruct { + id: 1, + name: "Test".to_string(), + }; + + // Encode the original struct + let encoded = MsgPackCodec::encode(&original).expect("Encoding failed"); + + // Decode back to struct + let decoded: TestStruct = MsgPackCodec::decode(&encoded).expect("Decoding failed"); + + // Assert that the original and decoded structs are equal + assert_eq!(original, decoded); + } +} diff --git a/utils/apalis-file-storage/Cargo.toml b/utils/apalis-file-storage/Cargo.toml new file mode 100644 index 00000000..cd5b0818 --- /dev/null +++ b/utils/apalis-file-storage/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "apalis-file-storage" +version = "0.1.0-rc.1" +rust-version.workspace = true +edition.workspace = true +repository.workspace = true +authors = ["Njuguna Mureithi "] +license = "MIT" +description = "A test-friendly single process file-based storage backend for apalis" +publish = true + +[lib] + +[dependencies] +serde = { version = "1.0.228", features = ["derive"] } +apalis-core = { path = "../../apalis-core", version = "1.0.0-rc.1", default-features = false, features = [ + "sleep", + "serde", +] } +serde_json = "1" +futures-channel = { version = "0.3.30", features = [ + "sink", + "std", +], default-features = false } +futures-sink = { version = "0.3.30", default-features = false } +futures-util = { version = "0.3.30", features = [ + "sink", + "async-await", + "async-await-macro", + "std", +], default-features = false } +futures-core = { version = "0.3.30", default-features = false } +apalis-codec = { path = "../apalis-codec", version = "0.1.0-rc.1", default-features = false, features = [ + "json", +] } + +[dev-dependencies] +tokio = { version = "1.37.0", features = ["full"] } +apalis-workflow = { path = "../../apalis-workflow", version = "0.1.0-rc.1" } # For json backend tests + +[lints] +workspace = true diff --git a/utils/apalis-file-storage/README.md b/utils/apalis-file-storage/README.md new file mode 100644 index 00000000..e69de29b diff --git a/apalis-core/src/backend/impls/json/backend.rs b/utils/apalis-file-storage/src/backend.rs similarity index 76% rename from apalis-core/src/backend/impls/json/backend.rs rename to utils/apalis-file-storage/src/backend.rs index f7d33925..ba9d3394 100644 --- a/apalis-core/src/backend/impls/json/backend.rs +++ b/utils/apalis-file-storage/src/backend.rs @@ -3,30 +3,27 @@ use std::{ task::{Context, Poll}, }; +use apalis_codec::json::JsonCodec; use futures_channel::mpsc::SendError; use futures_core::{Stream, stream::BoxStream}; use futures_util::{StreamExt, TryStreamExt, stream}; -use serde::{Serialize, de::DeserializeOwned}; +use serde::{Serialize, de::Deserialize}; use serde_json::Value; -use crate::{ - backend::{ - Backend, BackendExt, ConfigExt, TaskStream, - codec::json::JsonCodec, - impls::json::{ - JsonStorage, - meta::JsonMapMetadata, - util::{FindFirstWith, JsonAck}, - }, - queue::Queue, - }, +use apalis_core::{ + backend::{Backend, BackendExt, TaskStream, queue::Queue}, task::{Task, status::Status, task_id::RandomId}, worker::{context::WorkerContext, ext::ack::AcknowledgeLayer}, }; +use crate::{ + JsonMapMetadata, JsonStorage, + util::{FindFirstWith, JsonAck}, +}; + impl Backend for JsonStorage where - Args: 'static + Send + Serialize + DeserializeOwned + Unpin, + Args: 'static + Send + Serialize + for<'de> Deserialize<'de> + Unpin, { type Args = Args; type IdType = RandomId; @@ -49,12 +46,18 @@ where } } -impl BackendExt for JsonStorage { +impl Deserialize<'de> + Unpin> BackendExt + for JsonStorage +{ type Codec = JsonCodec; type Compact = Value; type CompactStream = TaskStream, SendError>; + fn get_queue(&self) -> Queue { + std::any::type_name::().into() + } + fn poll_compact(self, worker: &WorkerContext) -> Self::CompactStream { self.poll(worker) .map_ok(|c| { @@ -64,15 +67,7 @@ impl BackendExt for } } -impl ConfigExt for JsonStorage -where - Args: 'static + Send + Serialize + DeserializeOwned + Unpin, -{ - fn get_queue(&self) -> Queue { - Queue::from(std::any::type_name::()) - } -} -impl Stream for JsonStorage { +impl Deserialize<'de> + Unpin> Stream for JsonStorage { type Item = Task; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { @@ -80,7 +75,7 @@ impl Stream for JsonStorage { if let Some((key, task)) = map.find_first_with(|s, _| { s.queue == std::any::type_name::() && s.status == Status::Pending }) { - use crate::task::builder::TaskBuilder; + use apalis_core::task::builder::TaskBuilder; let key = key.clone(); let args = Args::deserialize(&task.args).unwrap(); let task = TaskBuilder::new(args) diff --git a/apalis-core/src/backend/impls/json/mod.rs b/utils/apalis-file-storage/src/lib.rs similarity index 96% rename from apalis-core/src/backend/impls/json/mod.rs rename to utils/apalis-file-storage/src/lib.rs index 28047dff..dddaffc2 100644 --- a/apalis-core/src/backend/impls/json/mod.rs +++ b/utils/apalis-file-storage/src/lib.rs @@ -13,7 +13,7 @@ //! ## Usage Example //! //! ```rust -//! # use apalis_core::backend::json::JsonStorage; +//! # use apalis_file_storage::JsonStorage;; //! # use apalis_core::worker::builder::WorkerBuilder; //! # use std::time::Duration; //! # use apalis_core::worker::context::WorkerContext; @@ -59,11 +59,8 @@ use std::{ sync::{Arc, RwLock}, }; -use self::{ - meta::JsonMapMetadata, - util::{TaskKey, TaskWithMeta}, -}; -use crate::{ +use self::util::{TaskKey, TaskWithMeta}; +use apalis_core::{ features_table, task::{ Task, @@ -80,6 +77,7 @@ mod sink; mod util; pub use self::shared::SharedJsonStore; +pub use meta::JsonMapMetadata; /// A backend that persists to a file using json encoding /// /// *Warning*: This backend is not optimized for high-throughput scenarios and is best suited for development, testing, or low-volume workloads. @@ -88,7 +86,7 @@ pub use self::shared::SharedJsonStore; /// /// Creates a temporary JSON storage backend /// ```rust -/// # use apalis_core::backend::json::JsonStorage; +/// # use apalis_file_storage::JsonStorage;; /// # pub fn setup_json_storage() -> JsonStorage { /// let mut backend = JsonStorage::new_temp().unwrap(); /// # backend @@ -97,7 +95,7 @@ pub use self::shared::SharedJsonStore; #[doc = features_table! { setup = r#" # { - # use apalis_core::backend::json::JsonStorage; + # use apalis_file_storage::JsonStorage;; # let mut backend = JsonStorage::new_temp().unwrap(); # backend # }; @@ -127,10 +125,9 @@ pub struct JsonStorage { _marker: std::marker::PhantomData, } -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Clone)] +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] struct StorageEntry { - task_id: TaskId, + task_id: TaskId, status: Status, task: TaskWithMeta, } @@ -323,10 +320,11 @@ impl Clone for JsonStorage { #[cfg(test)] mod tests { + use super::*; use std::time::Duration; - use crate::{ - backend::{TaskSink, json::JsonStorage}, + use apalis_core::{ + backend::TaskSink, error::BoxDynError, worker::{ builder::WorkerBuilder, context::WorkerContext, ext::event_listener::EventListenerExt, diff --git a/apalis-core/src/backend/impls/json/meta.rs b/utils/apalis-file-storage/src/meta.rs similarity index 55% rename from apalis-core/src/backend/impls/json/meta.rs rename to utils/apalis-file-storage/src/meta.rs index 3ea9df13..c9c4bb2c 100644 --- a/apalis-core/src/backend/impls/json/meta.rs +++ b/utils/apalis-file-storage/src/meta.rs @@ -1,17 +1,16 @@ -use serde::{Serialize, de::DeserializeOwned}; +use apalis_core::task::metadata::MetadataExt; +use serde::{Deserialize, Serialize}; -pub(super) type JsonMapMetadata = serde_json::Map; +/// A simple wrapper around a JSON map to represent task metadata +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] +pub struct JsonMapMetadata(serde_json::Map); -impl crate::task::metadata::MetadataExt for JsonMapMetadata -where - T: Serialize + DeserializeOwned, -{ +impl Deserialize<'de>> MetadataExt for JsonMapMetadata { type Error = serde_json::Error; - fn extract(&self) -> Result { use serde::de::Error as _; let key = std::any::type_name::(); - match self.get(key) { + match self.0.get(key) { Some(value) => T::deserialize(value), None => Err(serde_json::Error::custom(format!( "No entry for type `{key}` in metadata" @@ -22,7 +21,7 @@ where fn inject(&mut self, value: T) -> Result<(), serde_json::Error> { let key = std::any::type_name::(); let json_value = serde_json::to_value(value)?; - self.insert(key.to_owned(), json_value); + self.0.insert(key.to_owned(), json_value); Ok(()) } } diff --git a/apalis-core/src/backend/impls/json/shared.rs b/utils/apalis-file-storage/src/shared.rs similarity index 87% rename from apalis-core/src/backend/impls/json/shared.rs rename to utils/apalis-file-storage/src/shared.rs index 6e1d30a3..3262cbf0 100644 --- a/apalis-core/src/backend/impls/json/shared.rs +++ b/utils/apalis-file-storage/src/shared.rs @@ -7,12 +7,12 @@ /// /// # Example /// -/// ```rust +/// ```rust,no_run /// # use apalis_core::backend::shared::MakeShared; /// # use apalis_core::task::Task; /// # use apalis_core::worker::context::WorkerContext; /// # use apalis_core::worker::builder::WorkerBuilder; -/// # use apalis_core::backend::json::SharedJsonStore; +/// # use apalis_file_storage::SharedJsonStore; /// # use apalis_core::error::BoxDynError; /// # use std::time::Duration; /// # use apalis_core::backend::TaskSink; @@ -52,18 +52,11 @@ use futures_channel::mpsc::SendError; use futures_core::{Stream, stream::BoxStream}; use futures_sink::Sink; use futures_util::{SinkExt, StreamExt}; -use serde::{Serialize, de::DeserializeOwned}; +use serde::{Deserialize, Serialize, de::DeserializeOwned}; use serde_json::Value; -use crate::{ - backend::impls::{ - json::{ - JsonStorage, - meta::JsonMapMetadata, - util::{FindFirstWith, TaskKey, TaskWithMeta}, - }, - memory::{MemorySink, MemoryStorage}, - }, +use apalis_core::{ + backend::memory::{MemorySink, MemoryStorage}, task::{ Task, status::Status, @@ -71,6 +64,11 @@ use crate::{ }, }; +use crate::{ + JsonMapMetadata, JsonStorage, + util::{FindFirstWith, TaskKey, TaskWithMeta}, +}; + #[derive(Debug)] struct SharedJsonStream { inner: JsonStorage, @@ -80,15 +78,14 @@ struct SharedJsonStream { impl Stream for SharedJsonStream { type Item = Task; fn poll_next(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll> { - use crate::task::builder::TaskBuilder; + use apalis_core::task::builder::TaskBuilder; let map = self.inner.tasks.try_read().expect("Failed to read tasks"); if let Some((key, _)) = map.find_first_with(|k, _| { k.queue == std::any::type_name::() && k.status == Status::Pending }) { let task = map.get(key).unwrap(); - let args = match Args::deserialize(&task.args) { - Ok(value) => value, - Err(_) => return Poll::Pending, + let Ok(args) = Args::deserialize(&task.args) else { + return Poll::Pending; }; let task = TaskBuilder::new(args) .with_task_id(key.task_id.clone()) @@ -134,8 +131,8 @@ impl SharedJsonStore { } } -impl - crate::backend::shared::MakeShared for SharedJsonStore +impl Deserialize<'de> + Unpin + 'static> + apalis_core::backend::shared::MakeShared for SharedJsonStore { type Backend = MemoryStorage; @@ -148,12 +145,8 @@ impl _: Self::Config, ) -> Result { let (sender, receiver) = self.inner.create_channel::(); - Ok(MemoryStorage { - sender: MemorySink { - inner: Arc::new(futures_util::lock::Mutex::new(sender)), - }, - receiver, - }) + let sender = MemorySink::new(Arc::new(futures_util::lock::Mutex::new(sender))); + Ok(MemoryStorage::new_with(sender, receiver)) } } @@ -166,7 +159,7 @@ type BoxSink = Box< >; impl JsonStorage { - fn create_channel( + fn create_channel Deserialize<'de> + Serialize + Send + Unpin>( &self, ) -> ( BoxSink, @@ -180,7 +173,7 @@ impl JsonStorage { let store = self.clone(); sender.with_flat_map(move |task: Task| { - use crate::task::task_id::RandomId; + use apalis_core::task::task_id::RandomId; let task_id = task .parts .task_id @@ -231,10 +224,10 @@ impl JsonStorage { mod tests { use std::time::Duration; - use crate::error::BoxDynError; + use apalis_core::error::BoxDynError; - use crate::worker::context::WorkerContext; - use crate::{ + use apalis_core::worker::context::WorkerContext; + use apalis_core::{ backend::{TaskSink, shared::MakeShared}, worker::{builder::WorkerBuilder, ext::event_listener::EventListenerExt}, }; diff --git a/apalis-core/src/backend/impls/json/sink.rs b/utils/apalis-file-storage/src/sink.rs similarity index 76% rename from apalis-core/src/backend/impls/json/sink.rs rename to utils/apalis-file-storage/src/sink.rs index 77099be0..4f3e20db 100644 --- a/apalis-core/src/backend/impls/json/sink.rs +++ b/utils/apalis-file-storage/src/sink.rs @@ -5,23 +5,21 @@ use std::{ use futures_channel::mpsc::SendError; use futures_sink::Sink; -use serde::{Serialize, de::DeserializeOwned}; +use serde::{Serialize, de::Deserialize}; use serde_json::Value; +use apalis_core::task::{ + Task, + task_id::{RandomId, TaskId}, +}; + use crate::{ - backend::impls::json::{ - JsonStorage, - meta::JsonMapMetadata, - util::{TaskKey, TaskWithMeta}, - }, - task::{ - Task, - task_id::{RandomId, TaskId}, - }, + JsonMapMetadata, JsonStorage, + util::{TaskKey, TaskWithMeta}, }; -impl Sink> - for JsonStorage +impl Deserialize<'de>> + Sink> for JsonStorage { type Error = SendError; @@ -43,7 +41,7 @@ impl Sink = this.buffer.drain(..).collect(); for task in tasks { - use crate::task::task_id::RandomId; + use apalis_core::task::task_id::RandomId; let task_id = task .parts @@ -54,7 +52,7 @@ impl Sink().to_owned(), - status: crate::task::status::Status::Pending, + status: apalis_core::task::status::Status::Pending, }; this.insert( &key, diff --git a/apalis-core/src/backend/impls/json/util.rs b/utils/apalis-file-storage/src/util.rs similarity index 88% rename from apalis-core/src/backend/impls/json/util.rs rename to utils/apalis-file-storage/src/util.rs index fc711861..ba69970f 100644 --- a/apalis-core/src/backend/impls/json/util.rs +++ b/utils/apalis-file-storage/src/util.rs @@ -4,8 +4,7 @@ use futures_util::FutureExt; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::{ - backend::impls::json::{JsonStorage, meta::JsonMapMetadata}, +use apalis_core::{ error::BoxDynError, task::{ status::Status, @@ -14,10 +13,11 @@ use crate::{ worker::ext::ack::Acknowledge, }; -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -#[derive(Debug, Clone)] +use crate::{JsonMapMetadata, JsonStorage}; + +#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] pub struct TaskKey { - pub(super) task_id: TaskId, + pub(super) task_id: TaskId, pub(super) queue: String, pub(super) status: Status, } @@ -75,7 +75,7 @@ impl Acknowledge, - ctx: &crate::task::Parts, + ctx: &apalis_core::task::Parts, ) -> Self::Future { let store = self.inner.clone(); let val = serde_json::to_value(res.as_ref().map_err(|e| e.to_string())).unwrap(); @@ -97,15 +97,14 @@ impl Acknowledge - crate::backend::WaitForCompletion for JsonStorage + apalis_core::backend::WaitForCompletion for JsonStorage where Args: Send + serde::de::DeserializeOwned + 'static + Unpin + Serialize, { type ResultStream = futures_core::stream::BoxStream< 'static, - Result, futures_channel::mpsc::SendError>, + Result, futures_channel::mpsc::SendError>, >; fn wait_for( &self, @@ -117,7 +116,7 @@ where let task_ids: HashSet<_> = task_ids.into_iter().collect(); struct PollState { vault: JsonStorage, - pending_tasks: HashSet, + pending_tasks: HashSet>, queue: String, poll_interval: Duration, _phantom: std::marker::PhantomData, @@ -156,7 +155,7 @@ where state.pending_tasks.remove(&task_id); let result: Result = serde_json::from_value(result).unwrap(); return Some(( - Ok(crate::backend::TaskResult { + Ok(apalis_core::backend::TaskResult { task_id, status: Status::Done, result, @@ -166,7 +165,7 @@ where } // No completed tasks, wait and try again - crate::timer::sleep(state.poll_interval).await; + apalis_core::timer::sleep(state.poll_interval).await; } } }) @@ -176,8 +175,8 @@ where async fn check_status( &self, task_ids: impl IntoIterator> + Send, - ) -> Result>, Self::Error> { - use crate::task::status::Status; + ) -> Result>, Self::Error> { + use apalis_core::task::status::Status; use std::collections::HashSet; let task_ids: HashSet<_> = task_ids.into_iter().collect(); let mut results = Vec::new(); @@ -190,12 +189,12 @@ where if let Some(value) = self.get(&key) { let result = match serde_json::from_value::>(value.result.unwrap()) { - Ok(result) => crate::backend::TaskResult { + Ok(result) => apalis_core::backend::TaskResult { task_id: task_id.clone(), status: Status::Done, result, }, - Err(e) => crate::backend::TaskResult { + Err(e) => apalis_core::backend::TaskResult { task_id: task_id.clone(), status: Status::Failed, result: Err(format!("Deserialization error: {e}")), From 2f6caf13e1f916f8127feb20b1c65413c257a8ea Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Fri, 19 Dec 2025 13:43:03 +0300 Subject: [PATCH 02/12] chore: implement fan-in and fan-out in dag execution --- apalis-core/src/task/metadata.rs | 2 +- apalis-workflow/src/and_then/mod.rs | 14 +- apalis-workflow/src/dag/mod.rs | 412 ++++++++++++++++++--- apalis-workflow/src/dag/service.rs | 502 ++++++++++++++++++++++++++ apalis-workflow/src/delay/mod.rs | 22 +- apalis-workflow/src/filter_map/mod.rs | 17 +- apalis-workflow/src/fold/mod.rs | 17 +- apalis-workflow/src/lib.rs | 5 +- apalis-workflow/src/service.rs | 2 +- 9 files changed, 923 insertions(+), 70 deletions(-) create mode 100644 apalis-workflow/src/dag/service.rs diff --git a/apalis-core/src/task/metadata.rs b/apalis-core/src/task/metadata.rs index 333729a1..8c1d597c 100644 --- a/apalis-core/src/task/metadata.rs +++ b/apalis-core/src/task/metadata.rs @@ -15,7 +15,7 @@ use crate::task_fn::FromRequest; /// Metadata wrapper for task contexts. #[derive(Debug, Clone)] -pub struct Meta(T); +pub struct Meta(pub T); /// Task metadata extension trait and implementations. /// This trait allows for injecting and extracting metadata associated with tasks. pub trait MetadataExt { diff --git a/apalis-workflow/src/and_then/mod.rs b/apalis-workflow/src/and_then/mod.rs index a2a6fc72..132d7d3e 100644 --- a/apalis-workflow/src/and_then/mod.rs +++ b/apalis-workflow/src/and_then/mod.rs @@ -65,9 +65,9 @@ where + Clone + Sink, Error = SinkError> + Unpin, - F: Service, Error = BoxDynError> + Send + 'static + Clone, + F: Service, Error = BoxDynError> + Send + Sync + 'static + Clone, S: Step, - Input: Send + 'static, + Input: Send + Sync + 'static, F::Future: Send + 'static, F::Error: Into + Send + 'static, B::Codec: Codec @@ -106,6 +106,16 @@ pub struct AndThenService { _marker: PhantomData<(Backend, Cur)>, } +impl Clone for AndThenService { + fn clone(&self) -> Self { + Self { + service: self.service.clone(), + _marker: PhantomData, + } + } +} + + impl AndThenService { /// Creates a new `AndThenService` with the provided service. pub fn new(service: Svc) -> Self { diff --git a/apalis-workflow/src/dag/mod.rs b/apalis-workflow/src/dag/mod.rs index fce0aaca..39647d23 100644 --- a/apalis-workflow/src/dag/mod.rs +++ b/apalis-workflow/src/dag/mod.rs @@ -1,62 +1,102 @@ -use std::{collections::HashMap, marker::PhantomData, sync::Mutex}; +use std::{ + collections::{HashMap, VecDeque}, + fmt::Debug, + marker::PhantomData, + sync::Mutex, +}; use apalis_core::{ + backend::{ + Backend, BackendExt, WaitForCompletion, + codec::{Codec, RawDataBackend}, + }, error::BoxDynError, - task::Task, + task::{Task, metadata::MetadataExt}, task_fn::{TaskFn, task_fn}, + worker::builder::{IntoWorkerService, WorkerService}, }; +use futures::Sink; use petgraph::{ Direction, algo::toposort, dot::Config, graph::{DiGraph, EdgeIndex, NodeIndex}, }; -use tower::{Service, ServiceBuilder}; +mod service; +use tower::{Service, ServiceBuilder, util::BoxCloneSyncService}; + +use crate::{ + BoxedService, DagService, dag::service::DagExecutionResponse, id_generator::GenerateId, +}; -use crate::{BoxedService, SteppedService}; +pub use service::DagflowContext; +pub use service::RootDagService; /// Directed Acyclic Graph (DAG) workflow builder #[derive(Debug)] -pub struct DagFlow { - graph: Mutex, ()>>, +pub struct DagFlow +where + B: BackendExt, +{ + graph: Mutex, ()>>, node_mapping: Mutex>, - _marker: PhantomData<(Input, Output)>, } -impl Default for DagFlow { +impl Default for DagFlow +where + B: BackendExt, +{ fn default() -> Self { Self::new() } } -impl DagFlow { +impl DagFlow +where + B: BackendExt, +{ /// Create a new DAG workflow builder #[must_use] pub fn new() -> Self { Self { graph: Mutex::new(DiGraph::new()), node_mapping: Mutex::new(HashMap::new()), - _marker: PhantomData, } } /// Add a node to the DAG #[must_use] #[allow(clippy::todo)] - pub fn add_node(&self, name: &str, service: S) -> NodeBuilder<'_, Input, S::Response> + pub fn add_node( + &self, + name: &str, + service: S, + ) -> NodeBuilder<'_, Input, S::Response, B> where - S: Service> + Send + 'static, + S: Service> + Send + 'static + Sync + Clone, S::Future: Send + 'static, + B::Codec: + Codec + Codec + 'static, + >::Error: Debug, + >::Error: Debug, + S::Error: Into, + B::Compact: Debug, { let svc = ServiceBuilder::new() - .map_request(|r: Task<(), (), ()>| todo!()) - .map_response(|r: S::Response| todo!()) - .map_err(|_e: S::Error| { - let boxed: BoxDynError = todo!(); + .map_request(|r: Task| { + r.map(|r| B::Codec::decode(&r).unwrap()) + }) + .map_response(|r: S::Response| B::Codec::encode(&r).unwrap()) + .map_err(|e: S::Error| { + let boxed: BoxDynError = e.into(); boxed }) .service(service); - let node = self.graph.lock().unwrap().add_node(BoxedService::new(svc)); + let node = self + .graph + .lock() + .unwrap() + .add_node(BoxCloneSyncService::new(svc)); self.node_mapping .lock() .unwrap() @@ -64,40 +104,63 @@ impl DagFlow { NodeBuilder { id: node, dag: self, - io: PhantomData, + _phantom: PhantomData, } } /// Add a task function node to the DAG - pub fn node(&self, node: F) -> NodeBuilder<'_, Input, O> + pub fn node(&self, node: F) -> NodeBuilder<'_, Input, O, B> where - TaskFn: Service, Response = O>, - F: Send + 'static, - Input: Send + 'static, - FnArgs: Send + 'static, - as Service>>::Future: Send + 'static, + TaskFn: Service, Response = O, Error = Err> + Clone, + F: Send + 'static + Sync, + Input: Send + 'static + Sync, + FnArgs: Send + 'static + Sync, + B::Context: Send + Sync + 'static, + as Service>>::Future: + Send + 'static, + B::Codec: Codec + 'static, + >::Error: Debug, + B::Codec: Codec + 'static, + B::Codec: Codec + 'static, + >::Error: Debug, + >::Error: Debug, + Err: Into, + B::Compact: Debug + { self.add_node(std::any::type_name::(), task_fn(node)) } /// Add a routing node to the DAG - pub fn route(&self, router: F) -> NodeBuilder<'_, Input, O> + pub fn route( + &self, + router: F, + ) -> NodeBuilder<'_, Input, O, B> where - TaskFn: Service, Response = O>, - F: Send + 'static, - Input: Send + 'static, - FnArgs: Send + 'static, - as Service>>::Future: Send + 'static, + TaskFn: Service, Response = O, Error = Err> + Clone, + F: Send + 'static + Sync, + Input: Send + 'static + Sync, + FnArgs: Send + 'static + Sync, + as Service>>::Future: + Send + 'static, O: Into, + B::Context: Send + Sync + 'static, + B::Codec: Codec + 'static, + B::Codec: Codec + 'static, + >::Error: Debug, + >::Error: Debug, + Err: Into, + B::Compact: Debug + { - self.add_node::, Input>( + self.add_node::, Input>( std::any::type_name::(), task_fn(router), ) } /// Build the DAG executor - pub fn build(self) -> Result { + pub fn build(self) -> Result, String> { // Validate DAG (check for cycles) let sorted = toposort(&*self.graph.lock().unwrap(), None).map_err(|_| "DAG contains cycles")?; @@ -117,23 +180,51 @@ impl DagFlow { graph, node_mapping: self.node_mapping.into_inner().unwrap(), topological_order: sorted, + not_ready: VecDeque::new(), }) } } /// Executor for DAG workflows + #[derive(Debug)] -pub struct DagExecutor { - graph: DiGraph, ()>, +pub struct DagExecutor +where + B: BackendExt, +{ + graph: DiGraph, ()>, node_mapping: HashMap, topological_order: Vec, start_nodes: Vec, end_nodes: Vec, + not_ready: VecDeque, +} + +impl Clone for DagExecutor +where + B: BackendExt, +{ + fn clone(&self) -> Self { + Self { + graph: self.graph.clone(), + node_mapping: self.node_mapping.clone(), + topological_order: self.topological_order.clone(), + start_nodes: self.start_nodes.clone(), + end_nodes: self.end_nodes.clone(), + not_ready: self.not_ready.clone(), + } + } } -impl DagExecutor { +impl DagExecutor +where + B: BackendExt, +{ /// Get a node by name - pub fn get_node_by_name_mut(&mut self, name: &str) -> Option<&mut SteppedService<(), (), ()>> { + pub fn get_node_by_name_mut( + &mut self, + name: &str, + ) -> Option<&mut DagService> { self.node_mapping .get(name) .and_then(|&idx| self.graph.node_weight_mut(idx)) @@ -163,15 +254,56 @@ impl DagExecutor { } } +impl IntoWorkerService, B::Compact, B::Context> + for DagExecutor +where + B: BackendExt + + Send + + Sync + + 'static + + Sink, Error = Err> + + Unpin + + Clone + + WaitForCompletion, + Err: std::error::Error + Send + Sync + 'static, + B::Context: MetadataExt> + Send + Sync + 'static, + B::IdType: Send + Sync + 'static + Default + GenerateId + PartialEq, + B: Sync + Backend, + B::Compact: Send + Sync + 'static + Clone, // Remove on compact + // B::Context: Clone, + >>::Error: Into, + B::Codec: Codec, Compact = Compact, Error = CdcErr> + 'static, + CdcErr: Into, + ::Codec: Codec< + DagExecutionResponse::Context, ::IdType>, + Compact = Compact, + Error = CdcErr, + >, +{ + type Backend = RawDataBackend; + fn into_service(self, b: B) -> WorkerService, RootDagService> { + WorkerService { + backend: RawDataBackend::new(b.clone()), + service: RootDagService::new(self, b), + } + } +} + /// Builder for a node in the DAG -#[derive(Clone, Debug)] -pub struct NodeBuilder<'a, Input, Output = ()> { +#[derive(Clone)] +pub struct NodeBuilder<'a, Input, Output, B> +where + B: BackendExt, +{ pub(crate) id: NodeIndex, - pub(crate) dag: &'a DagFlow, - pub(crate) io: PhantomData<(Input, Output)>, + pub(crate) dag: &'a DagFlow, + _phantom: PhantomData<(Input, Output)>, } -impl NodeBuilder<'_, Input, Output> { +impl NodeBuilder<'_, Input, Output, B> +where + B: BackendExt, +{ /// Specify dependencies for this node #[allow(clippy::needless_pass_by_value)] pub fn depends_on(self, deps: D) -> NodeHandle @@ -192,7 +324,7 @@ impl NodeBuilder<'_, Input, Output> { /// Handle for a node in the DAG #[derive(Clone, Debug)] -pub struct NodeHandle { +pub struct NodeHandle { pub(crate) id: NodeIndex, pub(crate) edges: Vec, pub(crate) _phantom: PhantomData<(Input, Output)>, @@ -210,7 +342,10 @@ impl DepsCheck<()> for () { } } -impl<'a, Input, Output> DepsCheck for &NodeBuilder<'a, Input, Output> { +impl<'a, Input, Output, B> DepsCheck for &NodeBuilder<'a, Input, Output, B> +where + B: BackendExt, +{ fn to_node_ids(&self) -> Vec { vec![self.id] } @@ -228,7 +363,10 @@ impl DepsCheck for (&NodeHandle,) { } } -impl<'a, Input, Output> DepsCheck for (&NodeBuilder<'a, Input, Output>,) { +impl<'a, Input, Output, B> DepsCheck for (&NodeBuilder<'a, Input, Output, B>,) +where + B: BackendExt, +{ fn to_node_ids(&self) -> Vec { vec![self.0.id] } @@ -243,8 +381,8 @@ impl> DepsCheck> for Vec { macro_rules! impl_deps_check { ($( $len:literal => ( $( $in:ident $out:ident $idx:tt ),+ ) ),+ $(,)?) => { $( - impl<'a, $( $in, )+ $( $out, )+> DepsCheck<( $( $out, )+ )> - for ( $( &NodeBuilder<'a, $in, $out>, )+ ) + impl<'a, $( $in, )+ $( $out, )+ B> DepsCheck<( $( $out, )+ )> + for ( $( &NodeBuilder<'a, $in, $out, B>, )+ ) where B: BackendExt { fn to_node_ids(&self) -> Vec { vec![ $( self.$idx.id ),+ ] @@ -280,24 +418,178 @@ mod tests { }; use apalis_core::{ - error::BoxDynError, task::Task, task_fn::task_fn, worker::context::WorkerContext, + error::BoxDynError, + task::{Task, builder::TaskBuilder, task_id::RandomId}, + task_fn::task_fn, + worker::{ + builder::WorkerBuilder, context::WorkerContext, event::Event, + ext::event_listener::EventListenerExt, + }, }; + use apalis_file_storage::{JsonMapMetadata, JsonStorage}; use petgraph::graph::NodeIndex; + use serde::{Deserialize, Serialize}; use serde_json::Value; - use crate::{step::Identity, workflow::Workflow}; + use crate::{WorkflowSink, step::Identity, workflow::Workflow}; use super::*; - #[test] - fn test_basic_workflow() { + #[tokio::test] + async fn test_basic_workflow() { + let dag = DagFlow::new(); + let start = dag.add_node("start", task_fn(|task: u32| async move { task as usize })); + let middle = dag + .add_node( + "middle", + task_fn(|task: usize| async move { task.to_string() }), + ) + .depends_on(&start); + + let end = dag + .add_node( + "end", + task_fn(|task: String, worker: WorkerContext| async move { + worker.stop().unwrap(); + task.parse::() + }), + ) + .depends_on(&middle); + let dag_executor = dag.build().unwrap(); + assert_eq!(dag_executor.topological_order.len(), 3); + + println!("DAG in DOT format:\n{}", dag_executor.to_dot()); + + let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); + + backend.push_start(Value::from(42)).await.unwrap(); + + let worker = WorkerBuilder::new("rango-tango") + .backend(backend) + .on_event(|ctx, ev| { + println!("On Event = {ev:?}"); + if matches!(ev, Event::Error(_)) { + ctx.stop().unwrap(); + } + }) + .build::>, RootDagService>>( + dag_executor, + ); + worker.run().await.unwrap(); + } + + #[tokio::test] + async fn test_fan_out_workflow() { + let dag = DagFlow::new(); + let source = dag.add_node("source", task_fn(|task: u32| async move { task as usize })); + let plus_one = dag + .add_node("plus_one", task_fn(|task: usize| async move { task + 1 })) + .depends_on(&source); + + let multiply = dag + .add_node("multiply", task_fn(|task: usize| async move { task * 2 })) + .depends_on(&source); + let squared = dag + .add_node("squared", task_fn(|task: usize| async move { task * task })) + .depends_on(&source); + let dag_executor = dag.build().unwrap(); + assert_eq!(dag_executor.topological_order.len(), 4); + + println!("DAG in DOT format:\n{}", dag_executor.to_dot()); + + let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); + + backend.push_start(Value::from(42)).await.unwrap(); + + let worker = WorkerBuilder::new("rango-tango") + .backend(backend) + .on_event(|ctx, ev| { + println!("On Event = {ev:?}"); + if matches!(ev, Event::Error(_)) { + ctx.stop().unwrap(); + } + }) + .build::>, RootDagService>>( + dag_executor, + ); + worker.run().await.unwrap(); + } + + #[tokio::test] + async fn test_fan_in_workflow() { + let dag = DagFlow::new(); + let get_name = dag.add_node( + "get_name", + task_fn(|task: u32| async move { task as usize }), + ); + let get_age = dag.add_node( + "get_age", + task_fn(|task: u32| async move { task.to_string() }), + ); + let get_address = dag.add_node( + "get_address", + task_fn(|task: u32| async move { task as usize }), + ); + let main_collector = dag + .add_node( + "main_collector", + task_fn(|task: (String, usize, usize)| async move { + task.2 + task.1 + task.0.parse::().unwrap() + }), + ) + .depends_on((&get_age, &get_name, &get_address)); + + let side_collector = dag + .add_node( + "side_collector", + task_fn(|task: Vec| async move { task.iter().sum::() }), + ) + .depends_on(vec![&get_name, &get_address]); + + let final_node = dag + .add_node( + "final_node", + task_fn(|task: (usize, usize), w: WorkerContext| async move { + w.stop().unwrap(); + task.0 + task.1 + }), + ) + .depends_on((&main_collector, &side_collector)); + let dag_executor = dag.build().unwrap(); + assert_eq!(dag_executor.topological_order.len(), 6); + + println!("DAG in DOT format:\n{}", dag_executor.to_dot()); + + let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); + + backend + .push_start(Value::from(vec![42, 43, 44])) + .await + .unwrap(); + + let worker = WorkerBuilder::new("rango-tango") + .backend(backend) + .on_event(|ctx, ev| { + println!("On Event = {ev:?}"); + if matches!(ev, Event::Error(_)) { + ctx.stop().unwrap(); + } + }) + .build::>, RootDagService>>( + dag_executor, + ); + worker.run().await.unwrap(); + } + + #[tokio::test] + async fn test_routed_workflow() { let dag = DagFlow::new(); let entry1 = dag.add_node("entry1", task_fn(|task: u32| async move { task as usize })); let entry2 = dag.add_node("entry2", task_fn(|task: u32| async move { task as usize })); let entry3 = dag.add_node("entry3", task_fn(|task: u32| async move { task as usize })); - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] enum EntryRoute { Entry1(NodeIndex), Entry2(NodeIndex), @@ -340,6 +632,7 @@ mod tests { let on_collect = dag.node(exit).depends_on((&collector, &vec_collector)); async fn check_approval(task: u32) -> Result { + println!("Approval check for task: {}", task); match task % 3 { 0 => Ok(EntryRoute::Entry1(NodeIndex::new(0))), 1 => Ok(EntryRoute::Entry2(NodeIndex::new(1))), @@ -363,6 +656,23 @@ mod tests { println!("DAG in DOT format:\n{}", dag_executor.to_dot()); + let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); + + backend.push_start(Value::from(vec![17, 18, 19])).await.unwrap(); + + let worker = WorkerBuilder::new("rango-tango") + .backend(backend) + .on_event(|ctx, ev| { + println!("On Event = {ev:?}"); + if matches!(ev, Event::Error(_)) { + ctx.stop().unwrap(); + } + }) + .build::>, RootDagService>>( + dag_executor, + ); + worker.run().await.unwrap(); + // let inner_basic: Workflow, _> = Workflow::new("basic") // .and_then(async |input: u32| (input + 1) as usize) // .and_then(async |input: usize| input.to_string()) diff --git a/apalis-workflow/src/dag/service.rs b/apalis-workflow/src/dag/service.rs new file mode 100644 index 00000000..e6bf1d69 --- /dev/null +++ b/apalis-workflow/src/dag/service.rs @@ -0,0 +1,502 @@ +use apalis_core::backend::codec::Codec; +use apalis_core::backend::{self, BackendExt, TaskResult}; +use apalis_core::task; +use apalis_core::task::builder::TaskBuilder; +use apalis_core::task::metadata::Meta; +use apalis_core::task::status::Status; +use apalis_core::{ + backend::{Backend, WaitForCompletion}, + error::BoxDynError, + task::{Task, metadata::MetadataExt, task_id::TaskId}, +}; +use futures::future::BoxFuture; +use futures::stream::StreamExt; +use futures::{FutureExt, Sink, SinkExt}; +use petgraph::graph::{DiGraph, NodeIndex}; +use petgraph::{Direction, graph}; +use serde::{Deserialize, Serialize, de}; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::fmt::Debug; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tower::Service; + +use crate::id_generator::GenerateId; +use crate::{DagExecutor, DagService}; + +/// Metadata stored in each task for workflow processing +#[derive(Debug, Clone, Deserialize, Serialize, Default)] +pub struct DagflowContext { + /// The current node being executed in the DAG + pub current_node: NodeIndex, + + /// All nodes that have been completed in this execution + pub completed_nodes: HashSet, + + /// Map of node indices to their task IDs for result lookup + pub node_task_ids: HashMap>, + + /// Current position in the topological order + pub current_position: usize, + + /// Whether this is the initial execution + pub is_initial: bool, + + /// The original task ID that started this DAG execution + pub root_task_id: Option>, + + _phantom: std::marker::PhantomData, +} + +impl DagflowContext { + /// Create initial context for DAG execution + pub fn new(root_task_id: Option>) -> Self { + Self { + current_node: NodeIndex::new(0), + completed_nodes: HashSet::new(), + node_task_ids: HashMap::new(), + current_position: 0, + is_initial: true, + root_task_id, + _phantom: std::marker::PhantomData, + } + } + + /// Mark a node as completed and store its task ID + pub fn mark_completed(&mut self, node: NodeIndex, task_id: TaskId) { + self.completed_nodes.insert(node); + self.node_task_ids.insert(node, task_id); + } + + /// Check if all dependencies of a node are completed + pub fn are_dependencies_complete(&self, dependencies: &[NodeIndex]) -> bool { + dependencies + .iter() + .all(|dep| self.completed_nodes.contains(dep)) + } + + /// Check if the DAG execution is complete + pub fn is_complete(&self, end_nodes: &Vec) -> bool { + end_nodes + .iter() + .all(|node| self.completed_nodes.contains(node)) + } + + /// Get task IDs for dependencies of a given node + pub fn get_dependency_task_ids(&self, dependencies: &[NodeIndex]) -> Vec> { + dependencies + .iter() + .filter_map(|dep| self.node_task_ids.get(dep).cloned()) + .collect() + } +} + +/// Response from DAG execution step +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum DagExecutionResponse { + InitializeFanIn { + input: Compact, + context: DagflowContext, + }, + EnqueueNext { + result: Compact, + context: DagflowContext, + }, + /// Waiting for dependencies to complete + WaitingForDependencies { + node: NodeIndex, + pending_dependencies: Vec, + context: DagflowContext, + }, + + /// DAG execution is complete + Complete { + end_node_task_ids: Vec>, + context: DagflowContext, + }, + + /// No more nodes can execute (shouldn't happen and should be an error) + Stuck { + context: DagflowContext, + }, +} + +impl Service> for DagExecutor +where + B: BackendExt, + B::Context: + Send + Sync + 'static + MetadataExt> + Default, + B::IdType: Clone + Send + Sync + 'static + GenerateId, + B::Compact: Send + Sync + 'static, +{ + type Response = DagExecutionResponse; + type Error = BoxDynError; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + // must wait for *all* services to be ready. + // this will cause head-of-line blocking unless the underlying services are always ready. + if self.not_ready.is_empty() { + return Poll::Ready(Ok(())); + } else { + if self + .graph + .node_weight_mut(self.not_ready[0]) + .unwrap() + .poll_ready(cx)? + .is_pending() + { + return Poll::Pending; + } + + self.not_ready.pop_front(); + } + } + } + + fn call(&mut self, req: Task) -> Self::Future { + // Clone what we need for the async block + let mut graph = self.graph.clone(); + let node_mapping = self.node_mapping.clone(); + let topological_order = self.topological_order.clone(); + let task_id = req.parts.task_id.as_ref().unwrap().clone(); + let start_nodes = self.start_nodes.clone(); + let end_nodes = self.end_nodes.clone(); + + Box::pin(async move { + let context_result = req.extract::>>(); + let mut context = match context_result.await { + Ok(ctx) => ctx.0, + Err(_) => { + if start_nodes.len() == 1 { + DagflowContext::new(req.parts.task_id.clone()) + } else { + println!("Initializing fan-in for multiple start nodes"); + return Ok(DagExecutionResponse::InitializeFanIn { + input: req.args, + context: DagflowContext::new(req.parts.task_id.clone()), + }); + } + } + }; + + // Check if execution is complete + if context.is_complete(&end_nodes) { + let end_task_ids = end_nodes + .iter() + .filter_map(|node| context.node_task_ids.get(node).cloned()) + .collect(); + + return Ok(DagExecutionResponse::Complete { + end_node_task_ids: end_task_ids, + context, + }); + } + + // Get dependencies for this node + let dependencies: Vec = graph + .neighbors_directed(context.current_node, Direction::Incoming) + .collect(); + + // Check if dependencies are ready + if !context.are_dependencies_complete(&dependencies) { + let pending: Vec = dependencies + .iter() + .copied() + .filter(|dep| !context.completed_nodes.contains(dep)) + .collect(); + + return Ok(DagExecutionResponse::WaitingForDependencies { + node: context.current_node, + pending_dependencies: pending, + context, + }); + } + + // Get the service for this node + let service = graph + .node_weight_mut(context.current_node) + .ok_or_else(|| BoxDynError::from("Node not found in graph"))?; + + let result = service.call(req).await.unwrap(); + + // Mark this node as completed (or in-progress) + context.mark_completed(context.current_node, task_id); + + Ok(DagExecutionResponse::EnqueueNext { result, context }) + }) + } +} + +/// Service that manages the execution of a DAG workflow +pub struct RootDagService +where + B: BackendExt, +{ + executor: DagExecutor, + backend: B, +} +impl RootDagService +where + B: BackendExt, +{ + pub(crate) fn new(executor: DagExecutor, backend: B) -> Self { + Self { executor, backend } + } +} + +impl Clone for RootDagService +where + B: BackendExt + Clone, +{ + fn clone(&self) -> Self { + Self { + executor: self.executor.clone(), + backend: self.backend.clone(), + } + } +} + +impl Service> for RootDagService +where + B: BackendExt + Send + Sync + 'static + Clone + WaitForCompletion, + B::IdType: GenerateId + Send + Sync + 'static + PartialEq, + B::Compact: Send + Sync + 'static + Clone, + B::Context: + Send + Sync + Default + MetadataExt> + 'static, + Err: std::error::Error + Send + Sync + 'static, + B: Sink, Error = Err> + Unpin, + B::Codec: Codec, Compact = B::Compact, Error = CdcErr> + + 'static + + Codec< + DagExecutionResponse, + Compact = B::Compact, + Error = CdcErr, + >, + CdcErr: Into, +{ + type Response = DagExecutionResponse; + type Error = BoxDynError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.executor.poll_ready(cx).map_err(|e| e.into()) + } + + fn call(&mut self, req: Task) -> Self::Future { + let mut executor = self.executor.clone(); + let backend = self.backend.clone(); + + async move { + let response = executor.call(req).await?; + match response { + DagExecutionResponse::EnqueueNext { + ref result, + ref context, + } => { + // Enqueue next tasks for downstream nodes + let mut graph = executor.graph.clone(); + let mut enqueue_futures = vec![]; + + for neighbor in executor + .graph + .neighbors_directed(context.current_node, Direction::Outgoing) + { + let service = graph + .node_weight_mut(neighbor) + .ok_or_else(|| BoxDynError::from("Node not found in graph"))?; + + let dependencies: Vec = graph + .neighbors_directed(neighbor, Direction::Incoming) + .collect(); + + let dependency_task_ids = context.get_dependency_task_ids(&dependencies); + + let task = TaskBuilder::new(result.clone()) + .with_task_id(TaskId::new(B::IdType::generate())) + .meta(DagflowContext { + current_node: neighbor, + completed_nodes: context.completed_nodes.clone(), + node_task_ids: dependency_task_ids + .iter() + .enumerate() + .map(|(i, task_id)| (dependencies[i], task_id.clone())) + .collect(), + current_position: context.current_position + 1, + is_initial: false, + root_task_id: context.root_task_id.clone(), + _phantom: std::marker::PhantomData, + }) + .build(); + + let mut b = backend.clone(); + + enqueue_futures.push(async move { + b.send(task).await.map_err(|e| BoxDynError::from(e))?; + Ok::<(), BoxDynError>(()) + }); + } + + // Await all enqueue operations + futures::future::try_join_all(enqueue_futures).await?; + } + DagExecutionResponse::InitializeFanIn { + ref input, + ref context, + } => { + use apalis_core::backend::codec::Codec; + let values: Vec = + B::Codec::decode(input).map_err(|e: CdcErr| e.into())?; + let start_nodes = executor.start_nodes.clone(); + assert_eq!(values.len(), start_nodes.len()); + + let mut collector_tasks = vec![]; + + let mut enqueue_futures = vec![]; + for (node_input, start_node) in values.into_iter().zip(start_nodes) { + let task_id = TaskId::new(B::IdType::generate()); + let task = TaskBuilder::new(node_input) + .with_task_id(task_id.clone()) + .meta(DagflowContext { + current_node: start_node, + completed_nodes: Default::default(), + node_task_ids: Default::default(), + current_position: context.current_position, + is_initial: true, + root_task_id: context.root_task_id.clone(), + _phantom: std::marker::PhantomData, + }) + .build(); + let mut b = backend.clone(); + collector_tasks.push((start_node, task_id)); + enqueue_futures.push( + async move { + b.send(task).await.map_err(|e| BoxDynError::from(e))?; + Ok::<(), BoxDynError>(()) + } + .boxed(), + ); + } + let collector_future = { + let mut b = backend.clone(); + let graph = executor.graph.clone(); + let collector_tasks = collector_tasks.clone(); + + async move { + let res: Vec> = b + .wait_for( + collector_tasks.iter().map(|(_, task_id)| task_id.clone()), + ) + .collect::>() + .await + .into_iter() + .collect::, _>>()?; + let outgoing_nodes = + graph.neighbors_directed(context.current_node, Direction::Outgoing); + for outgoing_node in outgoing_nodes { + let incoming_nodes = graph + .neighbors_directed(outgoing_node, Direction::Incoming) + .collect::>(); + + let all_good = res.iter().all(|r| matches!(r.status, Status::Done)); + + if !all_good { + return Err(BoxDynError::from( + "One or more collector tasks failed", + )); + } + let sorted_results = { + // Match the order of incoming_nodes by matching NodeIndex + incoming_nodes + .iter() + .rev() + .map(|node_index| { + let task_id = collector_tasks + .iter() + .find(|(n, _)| n == node_index) + .map(|(_, task_id)| task_id) + .expect("TaskId for incoming node not found"); + res.iter().find(|r| &r.task_id == task_id).expect( + "TaskResult for incoming node's task_id not found", + ) + }) + .collect::>() + }; + + let args = sorted_results + .into_iter() + .map(|s| { + let inner = s.result.as_ref().unwrap(); + let decoded: DagExecutionResponse< + B::Compact, + B::Context, + B::IdType, + > = B::Codec::decode(inner) + .map_err(|e: CdcErr| e.into())?; + match decoded { + DagExecutionResponse::EnqueueNext { + result, + context, + } => Ok(result), + _ => Err(BoxDynError::from( + "Unexpected response type from collector task", + )), + } + }) + .collect::, BoxDynError>>()?; + let mut completed_nodes = collector_tasks + .iter() + .map(|(node, _)| *node) + .collect::>(); + completed_nodes.insert(context.current_node); + + let task = TaskBuilder::new( + B::Codec::encode(&args).map_err(|e| e.into())?, + ) + .with_task_id(TaskId::new(B::IdType::generate())) + .meta(DagflowContext { + current_node: outgoing_node, + completed_nodes, + node_task_ids: collector_tasks.clone().into_iter().collect(), + current_position: context.current_position + 1, + is_initial: false, + root_task_id: context.root_task_id.clone(), + _phantom: std::marker::PhantomData, + }) + .build(); + b.send(task).await.map_err(|e| BoxDynError::from(e))?; + } + + println!("Collector results enqueued for next nodes"); + + Ok::<(), BoxDynError>(()) + } + } + .boxed(); + // Await all enqueue operations + enqueue_futures.push(collector_future); + futures::future::try_join_all(enqueue_futures).await?; + } + _ => { /* No action needed for other variants */ } + } + Ok(response) + } + .boxed() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_dag_executor_service() { + // This would test the Service implementation + // You would create a DagExecutor, create a Task, and call the service + } +} diff --git a/apalis-workflow/src/delay/mod.rs b/apalis-workflow/src/delay/mod.rs index 1bafc731..d92fecf3 100644 --- a/apalis-workflow/src/delay/mod.rs +++ b/apalis-workflow/src/delay/mod.rs @@ -55,12 +55,12 @@ where + Clone + 'static, Err: std::error::Error + Send + Sync + 'static, - S: Clone + Send + 'static, + S: Clone + Send + Sync + 'static, S::Response: Send + 'static, B::Codec: Codec + Codec + 'static, >::Error: Into, B::Context: Send + 'static + MetadataExt, - Input: Send + 'static, + Input: Send + Sync + 'static, >::Error: Into, B: BackendExt, S: Step, @@ -100,16 +100,26 @@ impl Layer for DelayWith { } /// Step that delays execution by a specified duration -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct DelayWithStep { f: F, inner: S, _marker: std::marker::PhantomData<(B, Input)>, } +impl Clone for DelayWithStep { + fn clone(&self) -> Self { + DelayWithStep { + f: self.f.clone(), + inner: self.inner.clone(), + _marker: std::marker::PhantomData, + } + } +} + impl Step for DelayWithStep where - F: FnMut(Task) -> Duration + Send + 'static + Clone, + F: FnMut(Task) -> Duration + Send + Sync + 'static + Clone, B::IdType: GenerateId + Send + 'static, B::Compact: Send + 'static, B: Sink, Error = Err> @@ -119,12 +129,12 @@ where + Clone + 'static, Err: std::error::Error + Send + Sync + 'static, - S: Clone + Send + 'static, + S: Clone + Send + Sync + 'static, S::Response: Send + 'static, B::Codec: Codec + Codec + 'static, >::Error: Into, B::Context: Send + 'static + MetadataExt, - Input: Send + 'static, + Input: Send + Sync + 'static, >::Error: Into, B: BackendExt, S: Step, diff --git a/apalis-workflow/src/filter_map/mod.rs b/apalis-workflow/src/filter_map/mod.rs index 1b52f03b..be2b4769 100644 --- a/apalis-workflow/src/filter_map/mod.rs +++ b/apalis-workflow/src/filter_map/mod.rs @@ -61,12 +61,21 @@ where } /// The filter service that handles filtering and mapping of task inputs to outputs. -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct FilterService { service: F, _marker: PhantomData<(Backend, Input, Iter)>, } +impl Clone for FilterService { + fn clone(&self) -> Self { + FilterService { + service: self.service.clone(), + _marker: PhantomData, + } + } +} + /// The state of the filter operation #[derive(Debug, Clone, Deserialize, Serialize)] @@ -238,7 +247,7 @@ where impl Step for FilterMapStep where - I: IntoIterator + Send + 'static, + I: IntoIterator + Send + Sync + 'static, B: BackendExt + Send + Sync @@ -248,11 +257,11 @@ where + WaitForCompletion>> + Unpin, F: Service, Error = BoxDynError, Response = Option> - + Send + + Send + Sync + 'static + Clone, S: Step, B>, - Input: Send + 'static, + Input: Send + Sync + 'static, F::Future: Send + 'static, F::Error: Into + Send + 'static, B::Codec: Codec diff --git a/apalis-workflow/src/fold/mod.rs b/apalis-workflow/src/fold/mod.rs index a9534fbb..b7a65056 100644 --- a/apalis-workflow/src/fold/mod.rs +++ b/apalis-workflow/src/fold/mod.rs @@ -70,7 +70,7 @@ impl, Init, B, MetaErr, Err, CodecErr for FoldStep where F: Service, Response = Init> - + Send + + Send + Sync + 'static + Clone, S: Step, @@ -81,7 +81,7 @@ where + Sink, Error = Err> + Unpin + 'static, - I: IntoIterator + Send + 'static, + I: IntoIterator + Send + Sync + 'static, B::Context: MetadataExt + MetadataExt + Send @@ -92,7 +92,7 @@ where + Codec<(Init, Input), Error = CodecError, Compact = B::Compact> + 'static, B::IdType: GenerateId + Send + 'static + Clone, - Init: Default + Send + 'static, + Init: Default + Send + Sync + 'static, Err: std::error::Error + Send + Sync + 'static, CodecError: std::error::Error + Send + Sync + 'static, F::Error: Into + Send + 'static, @@ -115,12 +115,21 @@ where } /// The fold service that handles folding over a collection of items. -#[derive(Clone, Debug)] +#[derive(Debug)] pub struct FoldService { fold: F, _marker: std::marker::PhantomData<(Init, I, B)>, } +impl Clone for FoldService { + fn clone(&self) -> Self { + Self { + fold: self.fold.clone(), + _marker: std::marker::PhantomData, + } + } +} + impl FoldService { /// Creates a new `FoldService` with the given fold function. pub fn new(fold: F) -> Self { diff --git a/apalis-workflow/src/lib.rs b/apalis-workflow/src/lib.rs index b73d14c8..83b777a0 100644 --- a/apalis-workflow/src/lib.rs +++ b/apalis-workflow/src/lib.rs @@ -11,10 +11,13 @@ use apalis_core::{error::BoxDynError, task::Task}; use crate::router::{GoTo, StepResult}; -type BoxedService = tower::util::BoxService; +type BoxedService = tower::util::BoxCloneSyncService; type SteppedService = BoxedService, GoTo>>; +type DagService = + BoxedService, Compact>; + /// combinator for sequential workflow execution. pub mod and_then; /// combinator for chaining multiple workflows. diff --git a/apalis-workflow/src/service.rs b/apalis-workflow/src/service.rs index a653c364..113fa51e 100644 --- a/apalis-workflow/src/service.rs +++ b/apalis-workflow/src/service.rs @@ -20,7 +20,7 @@ use crate::{ }; /// The main workflow service that orchestrates the execution of workflow steps. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct WorkflowService where B: BackendExt, From b1211b38f7d47aedef9a8a1d18433464c95269f2 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Mon, 22 Dec 2025 15:05:14 +0300 Subject: [PATCH 03/12] chore: get fanin fanout working --- apalis-workflow/src/and_then/mod.rs | 7 +- apalis-workflow/src/dag/context.rs | 74 +++ apalis-workflow/src/dag/error.rs | 67 +++ apalis-workflow/src/dag/executor.rs | 192 +++++++ apalis-workflow/src/dag/mod.rs | 279 +++++----- apalis-workflow/src/dag/node.rs | 95 ++++ apalis-workflow/src/dag/response.rs | 36 ++ apalis-workflow/src/dag/service.rs | 706 ++++++++++++-------------- apalis-workflow/src/filter_map/mod.rs | 3 +- apalis-workflow/src/fold/mod.rs | 3 +- apalis-workflow/src/lib.rs | 5 +- utils/apalis-file-storage/src/util.rs | 8 + 12 files changed, 920 insertions(+), 555 deletions(-) create mode 100644 apalis-workflow/src/dag/context.rs create mode 100644 apalis-workflow/src/dag/error.rs create mode 100644 apalis-workflow/src/dag/executor.rs create mode 100644 apalis-workflow/src/dag/node.rs create mode 100644 apalis-workflow/src/dag/response.rs diff --git a/apalis-workflow/src/and_then/mod.rs b/apalis-workflow/src/and_then/mod.rs index 132d7d3e..051bf0fb 100644 --- a/apalis-workflow/src/and_then/mod.rs +++ b/apalis-workflow/src/and_then/mod.rs @@ -65,7 +65,11 @@ where + Clone + Sink, Error = SinkError> + Unpin, - F: Service, Error = BoxDynError> + Send + Sync + 'static + Clone, + F: Service, Error = BoxDynError> + + Send + + Sync + + 'static + + Clone, S: Step, Input: Send + Sync + 'static, F::Future: Send + 'static, @@ -115,7 +119,6 @@ impl Clone for AndThenService { } } - impl AndThenService { /// Creates a new `AndThenService` with the provided service. pub fn new(service: Svc) -> Self { diff --git a/apalis-workflow/src/dag/context.rs b/apalis-workflow/src/dag/context.rs new file mode 100644 index 00000000..718f2b98 --- /dev/null +++ b/apalis-workflow/src/dag/context.rs @@ -0,0 +1,74 @@ +use std::collections::{HashMap, HashSet}; + +use apalis_core::task::task_id::TaskId; +use petgraph::graph::NodeIndex; +use serde::{Deserialize, Serialize}; + +/// Metadata stored in each task for workflow processing +#[derive(Debug, Deserialize, Serialize, Default)] +pub struct DagflowContext { + /// Previous node executed in the DAG + /// This is the source node that led to the current node's execution + pub prev_node: Option, + /// The current node being executed in the DAG + pub current_node: NodeIndex, + + /// All nodes that have been completed in this execution + pub completed_nodes: HashSet, + + /// Map of node indices to their task IDs for result lookup + pub node_task_ids: HashMap>, + + /// Current position in the topological order + pub current_position: usize, + + /// Whether this is the initial execution + pub is_initial: bool, + + /// The original task ID that started this DAG execution + pub root_task_id: Option>, +} + +impl Clone for DagflowContext { + fn clone(&self) -> Self { + Self { + prev_node: self.prev_node, + current_node: self.current_node, + completed_nodes: self.completed_nodes.clone(), + node_task_ids: self.node_task_ids.clone(), + current_position: self.current_position, + is_initial: self.is_initial, + root_task_id: self.root_task_id.clone(), + } + } +} + +impl DagflowContext { + /// Create initial context for DAG execution + pub fn new(root_task_id: Option>) -> Self { + Self { + prev_node: None, + current_node: NodeIndex::new(0), + completed_nodes: HashSet::new(), + node_task_ids: HashMap::new(), + current_position: 0, + is_initial: true, + root_task_id, + } + } + /// Get task IDs for dependencies of a given node + pub fn get_dependency_task_ids( + &self, + dependencies: &[NodeIndex], + ) -> HashMap> { + dependencies + .iter() + .filter_map(|dep| { + self.node_task_ids + .get(dep) + .cloned() + .map(|task_id| (*dep, task_id)) + }) + .collect() + } +} diff --git a/apalis-workflow/src/dag/error.rs b/apalis-workflow/src/dag/error.rs new file mode 100644 index 00000000..03815e11 --- /dev/null +++ b/apalis-workflow/src/dag/error.rs @@ -0,0 +1,67 @@ +use apalis_core::error::BoxDynError; +use petgraph::{algo::Cycle, graph::NodeIndex}; +use std::fmt::Debug; +use thiserror::Error; + +/// Errors that can occur during DAG workflow execution. +#[derive(Error, Debug)] +pub enum DagflowError { + /// An error originating from the actual node execution. + #[error("Node execution error: {0}")] + Node(#[source] BoxDynError), + /// An error originating from the backend. + #[error("Backend error: {0}")] + Backend(#[source] BoxDynError), + + /// An error originating from the service. + #[error("MissingService error: {0:?}")] + MissingService(petgraph::graph::NodeIndex), + + /// An error originating from the service. + #[error("Service error: {0}")] + Service(#[source] BoxDynError), + + /// An error related to codec operations. + #[error("Codec error: {0}")] + Codec(#[source] BoxDynError), + + /// An error related to metadata operations. + #[error("Metadata error: {0}")] + Metadata(#[source] BoxDynError), + + /// An error indicating that dependencies are not ready. + #[error("Dependencies not ready")] + DependenciesNotReady, + + /// An error indicating a missing task ID for a dependency node. + #[error("Missing task ID for dependency node {0:?}")] + MissingDependencyTaskId(petgraph::graph::NodeIndex), + + /// An error indicating that a task result was not found for a node. + #[error("Task result not found for node {0:?}")] + TaskResultNotFound(petgraph::graph::NodeIndex), + + /// An error indicating that a dependency task has failed. + #[error("Dependency task failed: {0}")] + DependencyTaskFailed(String), + + /// An error indicating an unexpected response type during fan-in. + #[error("Unexpected response type during fan-in")] + UnexpectedResponseType, + + /// An error indicating a mismatch in the number of inputs during fan-in. + #[error("Input count mismatch: expected {expected} inputs for fan-in, got {actual}")] + InputCountMismatch { + /// The expected number of inputs. + expected: usize, + /// The actual number of inputs received. + actual: usize, + }, + /// An error indicating that entry fan-out is not completed. + #[error("Entry fan-out not completed")] + EntryFanOutIncomplete, + + /// DAG contains cycles. + #[error("DAG contains cycles involving nodes: {0:?}")] + CyclicDAG(Cycle), +} diff --git a/apalis-workflow/src/dag/executor.rs b/apalis-workflow/src/dag/executor.rs new file mode 100644 index 00000000..4419b635 --- /dev/null +++ b/apalis-workflow/src/dag/executor.rs @@ -0,0 +1,192 @@ +use std::{ + collections::{HashMap, VecDeque}, + fmt::Debug, + pin::Pin, + task::{Context, Poll}, +}; + +use apalis_core::{ + backend::{ + Backend, BackendExt, WaitForCompletion, + codec::{Codec, RawDataBackend}, + }, + error::BoxDynError, + task::{ + Task, + metadata::{Meta, MetadataExt}, + }, + worker::builder::{IntoWorkerService, WorkerService}, +}; +use futures::Sink; +use petgraph::{ + dot::Config, + graph::{DiGraph, NodeIndex}, +}; +use tower::Service; + +use crate::{ + DagService, + dag::{DagflowContext, RootDagService, error::DagflowError, response::DagExecutionResponse}, + id_generator::GenerateId, +}; + +/// Executor for DAG workflows +#[derive(Debug)] +pub struct DagExecutor +where + B: BackendExt, +{ + pub(super) graph: DiGraph, ()>, + pub(super) node_mapping: HashMap, + pub(super) topological_order: Vec, + pub(super) start_nodes: Vec, + pub(super) end_nodes: Vec, + pub(super) not_ready: VecDeque, +} + +impl Clone for DagExecutor +where + B: BackendExt, +{ + fn clone(&self) -> Self { + Self { + graph: self.graph.clone(), + node_mapping: self.node_mapping.clone(), + topological_order: self.topological_order.clone(), + start_nodes: self.start_nodes.clone(), + end_nodes: self.end_nodes.clone(), + not_ready: self.not_ready.clone(), + } + } +} + +impl DagExecutor +where + B: BackendExt, +{ + /// Get a node by name + pub fn get_node_by_name_mut( + &mut self, + name: &str, + ) -> Option<&mut DagService> { + self.node_mapping + .get(name) + .and_then(|&idx| self.graph.node_weight_mut(idx)) + } + + /// Export the DAG to DOT format + #[must_use] + pub fn to_dot(&self) -> String { + let names = self + .node_mapping + .iter() + .map(|(name, &idx)| (idx, name.clone())) + .collect::>(); + let get_node_attributes = |_, (index, _)| { + format!( + "label=\"{}\"", + names.get(&index).cloned().unwrap_or_default() + ) + }; + let dot = petgraph::dot::Dot::with_attr_getters( + &self.graph, + &[Config::NodeNoLabel, Config::EdgeNoLabel], + &|_, _| String::new(), + &get_node_attributes, + ); + format!("{dot:?}") + } +} + +impl Service> for DagExecutor +where + B: BackendExt, + B::Context: + Send + Sync + 'static + MetadataExt, Error = MetaError> + Default, + B::IdType: Clone + Send + Sync + 'static + GenerateId + Debug, + B::Compact: Send + Sync + 'static, + MetaError: Into, +{ + type Response = B::Compact; + type Error = DagflowError; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + loop { + // must wait for *all* services to be ready. + // this will cause head-of-line blocking unless the underlying services are always ready. + if self.not_ready.is_empty() { + return Poll::Ready(Ok(())); + } else { + if self + .graph + .node_weight_mut(self.not_ready[0]) + .unwrap() + .poll_ready(cx) + .map_err(|e| DagflowError::Service(e))? + .is_pending() + { + return Poll::Pending; + } + + self.not_ready.pop_front(); + } + } + } + + fn call(&mut self, req: Task) -> Self::Future { + let mut graph = self.graph.clone(); + + Box::pin(async move { + let context = req + .extract::>>() + .await + .map_err(|e| DagflowError::Metadata(e.into()))? + .0; + + // Get the service for this node + let service = graph + .node_weight_mut(context.current_node) + .ok_or_else(|| DagflowError::MissingService(context.current_node))?; + + let result = service.call(req).await.map_err(|e| DagflowError::Node(e))?; + + Ok(result) + }) + } +} + +impl + IntoWorkerService, B::Compact, B::Context> for DagExecutor +where + B: BackendExt + + Send + + Sync + + 'static + + Sink, Error = Err> + + Unpin + + Clone + + WaitForCompletion, + Err: std::error::Error + Send + Sync + 'static, + B::Context: MetadataExt, Error = MetaError> + Send + Sync + 'static, + B::IdType: Send + Sync + 'static + Default + GenerateId + PartialEq + Debug, + B: Sync + Backend, + B::Compact: Send + Sync + 'static + Clone, + >>::Error: Into, + B::Codec: Codec, Compact = Compact, Error = CdcErr> + 'static, + CdcErr: Into, + ::Codec: Codec< + DagExecutionResponse::IdType>, + Compact = Compact, + Error = CdcErr, + >, + MetaError: Send + Sync + 'static + Into, +{ + type Backend = RawDataBackend; + fn into_service(self, b: B) -> WorkerService, RootDagService> { + WorkerService { + backend: RawDataBackend::new(b.clone()), + service: RootDagService::new(self, b), + } + } +} diff --git a/apalis-workflow/src/dag/mod.rs b/apalis-workflow/src/dag/mod.rs index 39647d23..eb31e974 100644 --- a/apalis-workflow/src/dag/mod.rs +++ b/apalis-workflow/src/dag/mod.rs @@ -20,16 +20,34 @@ use petgraph::{ Direction, algo::toposort, dot::Config, - graph::{DiGraph, EdgeIndex, NodeIndex}, + graph::{DiGraph, EdgeIndex, Node, NodeIndex}, }; -mod service; +/// DAG executor implementations +pub mod executor; +/// DAG service implementations +pub mod service; + +/// DAG error definitions +pub mod error; +/// DAG node implementations +pub mod node; + +/// DAG context implementations +pub mod context; + +/// DAG response implementations +pub mod response; + +use serde::{Deserialize, Serialize}; use tower::{Service, ServiceBuilder, util::BoxCloneSyncService}; use crate::{ - BoxedService, DagService, dag::service::DagExecutionResponse, id_generator::GenerateId, + BoxedService, DagService, + dag::{error::DagflowError, executor::DagExecutor, node::NodeService}, + id_generator::GenerateId, }; -pub use service::DagflowContext; +pub use context::DagflowContext; pub use service::RootDagService; /// Directed Acyclic Graph (DAG) workflow builder @@ -67,7 +85,7 @@ where /// Add a node to the DAG #[must_use] #[allow(clippy::todo)] - pub fn add_node( + pub fn add_node( &self, name: &str, service: S, @@ -75,31 +93,23 @@ where where S: Service> + Send + 'static + Sync + Clone, S::Future: Send + 'static, - B::Codec: - Codec + Codec + 'static, - >::Error: Debug, - >::Error: Debug, + B::Codec: Codec + + Codec + + 'static, + CodecError: Into + Send + 'static, S::Error: Into, - B::Compact: Debug, + B: Send + Sync + 'static, + Input: Send + Sync + 'static, { - let svc = ServiceBuilder::new() - .map_request(|r: Task| { - r.map(|r| B::Codec::decode(&r).unwrap()) - }) - .map_response(|r: S::Response| B::Codec::encode(&r).unwrap()) - .map_err(|e: S::Error| { - let boxed: BoxDynError = e.into(); - boxed - }) - .service(service); + let svc: NodeService = NodeService::new(service); let node = self .graph .lock() - .unwrap() + .expect("Failed to lock graph mutex") .add_node(BoxCloneSyncService::new(svc)); self.node_mapping .lock() - .unwrap() + .expect("Failed to lock node_mapping mutex") .insert(name.to_owned(), node); NodeBuilder { id: node, @@ -109,7 +119,7 @@ where } /// Add a task function node to the DAG - pub fn node(&self, node: F) -> NodeBuilder<'_, Input, O, B> + pub fn node(&self, node: F) -> NodeBuilder<'_, Input, O, B> where TaskFn: Service, Response = O, Error = Err> + Clone, F: Send + 'static + Sync, @@ -118,21 +128,19 @@ where B::Context: Send + Sync + 'static, as Service>>::Future: Send + 'static, - B::Codec: Codec + 'static, - >::Error: Debug, - B::Codec: Codec + 'static, - B::Codec: Codec + 'static, - >::Error: Debug, - >::Error: Debug, + B::Codec: Codec + 'static, + B::Codec: Codec + 'static, + CodecError: Into + Send + 'static, Err: Into, - B::Compact: Debug + B: Send + Sync + 'static, + Input: Send + Sync + 'static, { self.add_node(std::any::type_name::(), task_fn(node)) } /// Add a routing node to the DAG - pub fn route( + pub fn route( &self, router: F, ) -> NodeBuilder<'_, Input, O, B> @@ -145,25 +153,28 @@ where Send + 'static, O: Into, B::Context: Send + Sync + 'static, - B::Codec: Codec + 'static, - B::Codec: Codec + 'static, - >::Error: Debug, - >::Error: Debug, + B::Codec: Codec + 'static, + B::Codec: Codec + 'static, + CodecError: Into + Send + 'static, Err: Into, - B::Compact: Debug + B: Send + Sync + 'static, + Input: Send + Sync + 'static, { - self.add_node::, Input>( + self.add_node::, Input, CodecError>( std::any::type_name::(), task_fn(router), ) } /// Build the DAG executor - pub fn build(self) -> Result, String> { + pub fn build(self) -> Result, DagflowError> { // Validate DAG (check for cycles) - let sorted = - toposort(&*self.graph.lock().unwrap(), None).map_err(|_| "DAG contains cycles")?; + let sorted = toposort( + &*self.graph.lock().expect("Failed to lock graph mutex"), + None, + ) + .map_err(DagflowError::CyclicDAG)?; fn find_edge_nodes(graph: &DiGraph, direction: Direction) -> Vec { graph @@ -172,134 +183,59 @@ where .collect() } - let graph = self.graph.into_inner().unwrap(); + let graph = self + .graph + .into_inner() + .expect("Failed to unlock graph mutex"); Ok(DagExecutor { start_nodes: find_edge_nodes(&graph, Direction::Incoming), end_nodes: find_edge_nodes(&graph, Direction::Outgoing), graph, - node_mapping: self.node_mapping.into_inner().unwrap(), + node_mapping: self + .node_mapping + .into_inner() + .expect("Failed to unlock node_mapping mutex"), topological_order: sorted, not_ready: VecDeque::new(), }) } } -/// Executor for DAG workflows - -#[derive(Debug)] -pub struct DagExecutor +/// Builder for a node in the DAG +pub struct NodeBuilder<'a, Input, Output, B> where B: BackendExt, { - graph: DiGraph, ()>, - node_mapping: HashMap, - topological_order: Vec, - start_nodes: Vec, - end_nodes: Vec, - not_ready: VecDeque, + pub(crate) id: NodeIndex, + pub(crate) dag: &'a DagFlow, + _phantom: PhantomData<(Input, Output)>, } -impl Clone for DagExecutor +impl<'a, Input, Output, B> std::fmt::Debug for NodeBuilder<'a, Input, Output, B> where B: BackendExt, { - fn clone(&self) -> Self { - Self { - graph: self.graph.clone(), - node_mapping: self.node_mapping.clone(), - topological_order: self.topological_order.clone(), - start_nodes: self.start_nodes.clone(), - end_nodes: self.end_nodes.clone(), - not_ready: self.not_ready.clone(), - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NodeBuilder") + .field("id", &self.id) + .finish_non_exhaustive() } } -impl DagExecutor +impl<'a, Input, Output, B> Clone for NodeBuilder<'a, Input, Output, B> where B: BackendExt, { - /// Get a node by name - pub fn get_node_by_name_mut( - &mut self, - name: &str, - ) -> Option<&mut DagService> { - self.node_mapping - .get(name) - .and_then(|&idx| self.graph.node_weight_mut(idx)) - } - - /// Export the DAG to DOT format - #[must_use] - pub fn to_dot(&self) -> String { - let names = self - .node_mapping - .iter() - .map(|(name, &idx)| (idx, name.clone())) - .collect::>(); - let get_node_attributes = |_, (index, _)| { - format!( - "label=\"{}\"", - names.get(&index).cloned().unwrap_or_default() - ) - }; - let dot = petgraph::dot::Dot::with_attr_getters( - &self.graph, - &[Config::NodeNoLabel, Config::EdgeNoLabel], - &|_, _| String::new(), - &get_node_attributes, - ); - format!("{dot:?}") - } -} - -impl IntoWorkerService, B::Compact, B::Context> - for DagExecutor -where - B: BackendExt - + Send - + Sync - + 'static - + Sink, Error = Err> - + Unpin - + Clone - + WaitForCompletion, - Err: std::error::Error + Send + Sync + 'static, - B::Context: MetadataExt> + Send + Sync + 'static, - B::IdType: Send + Sync + 'static + Default + GenerateId + PartialEq, - B: Sync + Backend, - B::Compact: Send + Sync + 'static + Clone, // Remove on compact - // B::Context: Clone, - >>::Error: Into, - B::Codec: Codec, Compact = Compact, Error = CdcErr> + 'static, - CdcErr: Into, - ::Codec: Codec< - DagExecutionResponse::Context, ::IdType>, - Compact = Compact, - Error = CdcErr, - >, -{ - type Backend = RawDataBackend; - fn into_service(self, b: B) -> WorkerService, RootDagService> { - WorkerService { - backend: RawDataBackend::new(b.clone()), - service: RootDagService::new(self, b), + fn clone(&self) -> Self { + Self { + id: self.id, + dag: self.dag, + _phantom: PhantomData, } } } -/// Builder for a node in the DAG -#[derive(Clone)] -pub struct NodeBuilder<'a, Input, Output, B> -where - B: BackendExt, -{ - pub(crate) id: NodeIndex, - pub(crate) dag: &'a DagFlow, - _phantom: PhantomData<(Input, Output)>, -} - impl NodeBuilder<'_, Input, Output, B> where B: BackendExt, @@ -311,7 +247,7 @@ where D: DepsCheck, { let mut edges = Vec::new(); - for dep in deps.to_node_ids() { + for dep in deps.to_node_indices() { edges.push(self.dag.graph.lock().unwrap().add_edge(dep, self.id, ())); } NodeHandle { @@ -332,12 +268,12 @@ pub struct NodeHandle { /// Trait for converting dependencies into node IDs pub trait DepsCheck { - /// Convert dependencies to node IDs - fn to_node_ids(&self) -> Vec; + /// Convert dependencies to node indices + fn to_node_indices(&self) -> Vec; } impl DepsCheck<()> for () { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { Vec::new() } } @@ -346,19 +282,19 @@ impl<'a, Input, Output, B> DepsCheck for &NodeBuilder<'a, Input, Output, where B: BackendExt, { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { vec![self.id] } } impl DepsCheck for &NodeHandle { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { vec![self.id] } } impl DepsCheck for (&NodeHandle,) { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { vec![self.0.id] } } @@ -367,14 +303,16 @@ impl<'a, Input, Output, B> DepsCheck for (&NodeBuilder<'a, Input, Output where B: BackendExt, { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { vec![self.0.id] } } impl> DepsCheck> for Vec { - fn to_node_ids(&self) -> Vec { - self.iter().flat_map(|item| item.to_node_ids()).collect() + fn to_node_indices(&self) -> Vec { + self.iter() + .flat_map(|item| item.to_node_indices()) + .collect() } } @@ -384,7 +322,7 @@ macro_rules! impl_deps_check { impl<'a, $( $in, )+ $( $out, )+ B> DepsCheck<( $( $out, )+ )> for ( $( &NodeBuilder<'a, $in, $out, B>, )+ ) where B: BackendExt { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { vec![ $( self.$idx.id ),+ ] } } @@ -392,7 +330,7 @@ macro_rules! impl_deps_check { impl<$( $in, )+ $( $out, )+> DepsCheck<( $( $out, )+ )> for ( $( &NodeHandle<$in, $out>, )+ ) { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { vec![ $( self.$idx.id ),+ ] } } @@ -411,6 +349,19 @@ impl_deps_check! { 8 => (Input1 Output1 0, Input2 Output2 1, Input3 Output3 2, Input4 Output4 3, Input5 Output5 4, Input6 Output6 5, Input7 Output7 6, Input8 Output8 7), } +/// State of the node in DAG execution +#[derive(Debug, Clone, Deserialize, Serialize)] +pub enum DagState { + /// Unknown state + Unknown, + /// State for a single node + SingleNode, + /// Fan-in state to gather inputs + FanIn, + /// Fan-out state to distribute inputs + FanOut, +} + #[cfg(test)] mod tests { use std::{ @@ -492,8 +443,19 @@ mod tests { let squared = dag .add_node("squared", task_fn(|task: usize| async move { task * task })) .depends_on(&source); + + let collector = dag + .add_node( + "collector", + task_fn(|task: (usize, usize, usize), w: WorkerContext| async move { + w.stop().unwrap(); + task.0 + task.1 + task.2 + }), + ) + .depends_on((&plus_one, &multiply, &squared)); + let dag_executor = dag.build().unwrap(); - assert_eq!(dag_executor.topological_order.len(), 4); + assert_eq!(dag_executor.topological_order.len(), 5); println!("DAG in DOT format:\n{}", dag_executor.to_dot()); @@ -607,7 +569,7 @@ mod tests { } impl DepsCheck for EntryRoute { - fn to_node_ids(&self) -> Vec { + fn to_node_indices(&self) -> Vec { vec![(*self).into()] } } @@ -631,8 +593,12 @@ mod tests { let on_collect = dag.node(exit).depends_on((&collector, &vec_collector)); - async fn check_approval(task: u32) -> Result { + async fn check_approval( + task: u32, + worker: WorkerContext, + ) -> Result { println!("Approval check for task: {}", task); + worker.stop().unwrap(); match task % 3 { 0 => Ok(EntryRoute::Entry1(NodeIndex::new(0))), 1 => Ok(EntryRoute::Entry2(NodeIndex::new(1))), @@ -658,7 +624,10 @@ mod tests { let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); - backend.push_start(Value::from(vec![17, 18, 19])).await.unwrap(); + backend + .push_start(Value::from(vec![17, 18, 19])) + .await + .unwrap(); let worker = WorkerBuilder::new("rango-tango") .backend(backend) diff --git a/apalis-workflow/src/dag/node.rs b/apalis-workflow/src/dag/node.rs new file mode 100644 index 00000000..b88e7a2d --- /dev/null +++ b/apalis-workflow/src/dag/node.rs @@ -0,0 +1,95 @@ +use apalis_core::backend::BackendExt; +use apalis_core::backend::codec::Codec; +use apalis_core::error::BoxDynError; +use apalis_core::task::Task; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; +use tower::Service; + +/// A service that wraps another service to handle encoding and decoding +/// of task inputs and outputs using the backend's codec. +pub struct NodeService +where + S: Service>, + B: BackendExt, +{ + inner: S, + _phantom: std::marker::PhantomData<(B, Input)>, +} + +impl std::fmt::Debug for NodeService +where + S: Service>, + B: BackendExt, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("NodeService") + .field("inner", &"") + .field("_phantom", &std::any::type_name::<(B, Input)>()) + .finish() + } +} + +impl Clone for NodeService +where + S: Service> + Clone, + B: BackendExt, +{ + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + _phantom: std::marker::PhantomData, + } + } +} + +impl NodeService +where + S: Service>, + B: BackendExt, +{ + /// Creates a new `NodeService` wrapping the provided service. + pub fn new(inner: S) -> Self { + Self { + inner, + _phantom: std::marker::PhantomData, + } + } +} + +impl Service> + for NodeService +where + S: Service>, + S::Error: Into, + B: BackendExt, + B::Codec: Codec + + Codec, + CdcErr: Into + Send + 'static, + S::Future: Send + 'static, +{ + type Response = B::Compact; + type Error = BoxDynError; + type Future = Pin> + Send>>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx).map_err(|e| e.into()) + } + + fn call(&mut self, req: Task) -> Self::Future { + let decoded_req = match B::Codec::decode(&req.args) { + Ok(decoded) => req.map(|_| decoded), + Err(e) => { + return Box::pin(async move { Err(CdcErr::into(e)) }); + } + }; + + let fut = self.inner.call(decoded_req); + + Box::pin(async move { + let response = fut.await.map_err(|e| e.into())?; + B::Codec::encode(&response).map_err(|e| e.into()) + }) + } +} diff --git a/apalis-workflow/src/dag/response.rs b/apalis-workflow/src/dag/response.rs new file mode 100644 index 00000000..aeefab07 --- /dev/null +++ b/apalis-workflow/src/dag/response.rs @@ -0,0 +1,36 @@ +use std::collections::HashMap; + +use apalis_core::task::task_id::TaskId; +use petgraph::graph::NodeIndex; +use serde::{Deserialize, Serialize}; + +/// Response from DAG execution step +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum DagExecutionResponse { + /// Entry nodes have been fanned out + EntryFanOut { + /// Map of node indices to their task IDs + node_task_ids: HashMap>, + }, + /// Next tasks have been fanned out + FanOut { + /// Result of the current task + response: Compact, + }, + /// Next task has been enqueued + EnqueuedNext { + /// Result of the current task + result: Compact, + }, + /// Waiting for dependencies to complete + WaitingForDependencies { + /// Map of pending dependency node indices to their task IDs + pending_dependencies: HashMap>, + }, + + /// DAG execution is complete + Complete { + /// Result of the final task + result: Compact, + }, +} diff --git a/apalis-workflow/src/dag/service.rs b/apalis-workflow/src/dag/service.rs index e6bf1d69..005c76ec 100644 --- a/apalis-workflow/src/dag/service.rs +++ b/apalis-workflow/src/dag/service.rs @@ -1,9 +1,9 @@ use apalis_core::backend::codec::Codec; use apalis_core::backend::{self, BackendExt, TaskResult}; -use apalis_core::task; use apalis_core::task::builder::TaskBuilder; use apalis_core::task::metadata::Meta; use apalis_core::task::status::Status; +use apalis_core::task::{self, task_id}; use apalis_core::{ backend::{Backend, WaitForCompletion}, error::BoxDynError, @@ -22,222 +22,32 @@ use std::pin::Pin; use std::task::{Context, Poll}; use tower::Service; +use crate::dag::context::DagflowContext; +use crate::dag::response::DagExecutionResponse; use crate::id_generator::GenerateId; use crate::{DagExecutor, DagService}; -/// Metadata stored in each task for workflow processing -#[derive(Debug, Clone, Deserialize, Serialize, Default)] -pub struct DagflowContext { - /// The current node being executed in the DAG - pub current_node: NodeIndex, - - /// All nodes that have been completed in this execution - pub completed_nodes: HashSet, - - /// Map of node indices to their task IDs for result lookup - pub node_task_ids: HashMap>, - - /// Current position in the topological order - pub current_position: usize, - - /// Whether this is the initial execution - pub is_initial: bool, - - /// The original task ID that started this DAG execution - pub root_task_id: Option>, - - _phantom: std::marker::PhantomData, -} - -impl DagflowContext { - /// Create initial context for DAG execution - pub fn new(root_task_id: Option>) -> Self { - Self { - current_node: NodeIndex::new(0), - completed_nodes: HashSet::new(), - node_task_ids: HashMap::new(), - current_position: 0, - is_initial: true, - root_task_id, - _phantom: std::marker::PhantomData, - } - } - - /// Mark a node as completed and store its task ID - pub fn mark_completed(&mut self, node: NodeIndex, task_id: TaskId) { - self.completed_nodes.insert(node); - self.node_task_ids.insert(node, task_id); - } - - /// Check if all dependencies of a node are completed - pub fn are_dependencies_complete(&self, dependencies: &[NodeIndex]) -> bool { - dependencies - .iter() - .all(|dep| self.completed_nodes.contains(dep)) - } - - /// Check if the DAG execution is complete - pub fn is_complete(&self, end_nodes: &Vec) -> bool { - end_nodes - .iter() - .all(|node| self.completed_nodes.contains(node)) - } - - /// Get task IDs for dependencies of a given node - pub fn get_dependency_task_ids(&self, dependencies: &[NodeIndex]) -> Vec> { - dependencies - .iter() - .filter_map(|dep| self.node_task_ids.get(dep).cloned()) - .collect() - } -} - -/// Response from DAG execution step -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum DagExecutionResponse { - InitializeFanIn { - input: Compact, - context: DagflowContext, - }, - EnqueueNext { - result: Compact, - context: DagflowContext, - }, - /// Waiting for dependencies to complete - WaitingForDependencies { - node: NodeIndex, - pending_dependencies: Vec, - context: DagflowContext, - }, - - /// DAG execution is complete - Complete { - end_node_task_ids: Vec>, - context: DagflowContext, - }, - - /// No more nodes can execute (shouldn't happen and should be an error) - Stuck { - context: DagflowContext, - }, -} - -impl Service> for DagExecutor +/// Service that manages the execution of a DAG workflow +pub struct RootDagService where B: BackendExt, - B::Context: - Send + Sync + 'static + MetadataExt> + Default, - B::IdType: Clone + Send + Sync + 'static + GenerateId, - B::Compact: Send + Sync + 'static, { - type Response = DagExecutionResponse; - type Error = BoxDynError; - type Future = Pin> + Send>>; - - fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { - loop { - // must wait for *all* services to be ready. - // this will cause head-of-line blocking unless the underlying services are always ready. - if self.not_ready.is_empty() { - return Poll::Ready(Ok(())); - } else { - if self - .graph - .node_weight_mut(self.not_ready[0]) - .unwrap() - .poll_ready(cx)? - .is_pending() - { - return Poll::Pending; - } - - self.not_ready.pop_front(); - } - } - } - - fn call(&mut self, req: Task) -> Self::Future { - // Clone what we need for the async block - let mut graph = self.graph.clone(); - let node_mapping = self.node_mapping.clone(); - let topological_order = self.topological_order.clone(); - let task_id = req.parts.task_id.as_ref().unwrap().clone(); - let start_nodes = self.start_nodes.clone(); - let end_nodes = self.end_nodes.clone(); - - Box::pin(async move { - let context_result = req.extract::>>(); - let mut context = match context_result.await { - Ok(ctx) => ctx.0, - Err(_) => { - if start_nodes.len() == 1 { - DagflowContext::new(req.parts.task_id.clone()) - } else { - println!("Initializing fan-in for multiple start nodes"); - return Ok(DagExecutionResponse::InitializeFanIn { - input: req.args, - context: DagflowContext::new(req.parts.task_id.clone()), - }); - } - } - }; - - // Check if execution is complete - if context.is_complete(&end_nodes) { - let end_task_ids = end_nodes - .iter() - .filter_map(|node| context.node_task_ids.get(node).cloned()) - .collect(); - - return Ok(DagExecutionResponse::Complete { - end_node_task_ids: end_task_ids, - context, - }); - } - - // Get dependencies for this node - let dependencies: Vec = graph - .neighbors_directed(context.current_node, Direction::Incoming) - .collect(); - - // Check if dependencies are ready - if !context.are_dependencies_complete(&dependencies) { - let pending: Vec = dependencies - .iter() - .copied() - .filter(|dep| !context.completed_nodes.contains(dep)) - .collect(); - - return Ok(DagExecutionResponse::WaitingForDependencies { - node: context.current_node, - pending_dependencies: pending, - context, - }); - } - - // Get the service for this node - let service = graph - .node_weight_mut(context.current_node) - .ok_or_else(|| BoxDynError::from("Node not found in graph"))?; - - let result = service.call(req).await.unwrap(); - - // Mark this node as completed (or in-progress) - context.mark_completed(context.current_node, task_id); - - Ok(DagExecutionResponse::EnqueueNext { result, context }) - }) - } + executor: DagExecutor, + backend: B, } -/// Service that manages the execution of a DAG workflow -pub struct RootDagService +impl std::fmt::Debug for RootDagService where B: BackendExt, { - executor: DagExecutor, - backend: B, + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RootDagService") + .field("executor", &"") + .field("backend", &"") + .finish() + } } + impl RootDagService where B: BackendExt, @@ -259,25 +69,23 @@ where } } -impl Service> for RootDagService +impl Service> + for RootDagService where B: BackendExt + Send + Sync + 'static + Clone + WaitForCompletion, - B::IdType: GenerateId + Send + Sync + 'static + PartialEq, + B::IdType: GenerateId + Send + Sync + 'static + PartialEq + Debug, B::Compact: Send + Sync + 'static + Clone, B::Context: - Send + Sync + Default + MetadataExt> + 'static, + Send + Sync + Default + MetadataExt, Error = MetaError> + 'static, Err: std::error::Error + Send + Sync + 'static, B: Sink, Error = Err> + Unpin, B::Codec: Codec, Compact = B::Compact, Error = CdcErr> + 'static - + Codec< - DagExecutionResponse, - Compact = B::Compact, - Error = CdcErr, - >, + + Codec, Compact = B::Compact, Error = CdcErr>, CdcErr: Into, + MetaError: Into + Send + Sync + 'static, { - type Response = DagExecutionResponse; + type Response = DagExecutionResponse; type Error = BoxDynError; type Future = BoxFuture<'static, Result>; @@ -288,215 +96,327 @@ where self.executor.poll_ready(cx).map_err(|e| e.into()) } - fn call(&mut self, req: Task) -> Self::Future { + fn call(&mut self, mut req: Task) -> Self::Future { let mut executor = self.executor.clone(); - let backend = self.backend.clone(); - + let mut backend = self.backend.clone(); + let start_nodes = executor.start_nodes.clone(); + let end_nodes = executor.end_nodes.clone(); async move { - let response = executor.call(req).await?; - match response { - DagExecutionResponse::EnqueueNext { - ref result, - ref context, - } => { - // Enqueue next tasks for downstream nodes - let mut graph = executor.graph.clone(); - let mut enqueue_futures = vec![]; - - for neighbor in executor + let ctx = req + .extract::>>() + .await; + let (response, context) = match ctx { + Ok(Meta(mut context)) => { + tracing::debug!( + task_id = ?req.parts.task_id, + node = ?context.current_node, + "Extracted DagflowContext for task" + ); + let incoming_nodes = executor .graph - .neighbors_directed(context.current_node, Direction::Outgoing) - { - let service = graph - .node_weight_mut(neighbor) - .ok_or_else(|| BoxDynError::from("Node not found in graph"))?; - - let dependencies: Vec = graph - .neighbors_directed(neighbor, Direction::Incoming) - .collect(); - - let dependency_task_ids = context.get_dependency_task_ids(&dependencies); - - let task = TaskBuilder::new(result.clone()) - .with_task_id(TaskId::new(B::IdType::generate())) - .meta(DagflowContext { - current_node: neighbor, - completed_nodes: context.completed_nodes.clone(), - node_task_ids: dependency_task_ids - .iter() - .enumerate() - .map(|(i, task_id)| (dependencies[i], task_id.clone())) - .collect(), - current_position: context.current_position + 1, - is_initial: false, - root_task_id: context.root_task_id.clone(), - _phantom: std::marker::PhantomData, - }) - .build(); - - let mut b = backend.clone(); - - enqueue_futures.push(async move { - b.send(task).await.map_err(|e| BoxDynError::from(e))?; - Ok::<(), BoxDynError>(()) - }); - } - - // Await all enqueue operations - futures::future::try_join_all(enqueue_futures).await?; - } - DagExecutionResponse::InitializeFanIn { - ref input, - ref context, - } => { - use apalis_core::backend::codec::Codec; - let values: Vec = - B::Codec::decode(input).map_err(|e: CdcErr| e.into())?; - let start_nodes = executor.start_nodes.clone(); - assert_eq!(values.len(), start_nodes.len()); - - let mut collector_tasks = vec![]; - - let mut enqueue_futures = vec![]; - for (node_input, start_node) in values.into_iter().zip(start_nodes) { - let task_id = TaskId::new(B::IdType::generate()); - let task = TaskBuilder::new(node_input) - .with_task_id(task_id.clone()) - .meta(DagflowContext { - current_node: start_node, - completed_nodes: Default::default(), - node_task_ids: Default::default(), - current_position: context.current_position, - is_initial: true, - root_task_id: context.root_task_id.clone(), - _phantom: std::marker::PhantomData, - }) - .build(); - let mut b = backend.clone(); - collector_tasks.push((start_node, task_id)); - enqueue_futures.push( - async move { - b.send(task).await.map_err(|e| BoxDynError::from(e))?; - Ok::<(), BoxDynError>(()) - } - .boxed(), - ); - } - let collector_future = { - let mut b = backend.clone(); - let graph = executor.graph.clone(); - let collector_tasks = collector_tasks.clone(); - - async move { - let res: Vec> = b - .wait_for( - collector_tasks.iter().map(|(_, task_id)| task_id.clone()), + .neighbors_directed(context.current_node, Direction::Incoming) + .collect::>(); + match incoming_nodes.len() { + // Entry node + 0 if start_nodes.len() == 1 => { + let response = executor.call(req).await?; + (response, context) + } + // Entry node with multiple start nodes + 0 if start_nodes.len() > 1 => { + let response = executor.call(req).await?; + (response, context) + } + // Single incoming node, proceed normally + 1 => { + let response = executor.call(req).await?; + (response, context) + } + // Multiple incoming nodes, fan-in scenario + _ => { + let dependency_task_ids = + context.get_dependency_task_ids(&incoming_nodes); + tracing::debug!( + task_id = ?req.parts.task_id, + prev_node = ?context.prev_node, + node = ?context.current_node, + deps = ?dependency_task_ids, + "Fanning in from multiple dependencies", + ); + let results = backend + .check_status( + dependency_task_ids.values().cloned().collect::>(), ) - .collect::>() - .await - .into_iter() - .collect::, _>>()?; - let outgoing_nodes = - graph.neighbors_directed(context.current_node, Direction::Outgoing); - for outgoing_node in outgoing_nodes { - let incoming_nodes = graph - .neighbors_directed(outgoing_node, Direction::Incoming) - .collect::>(); - - let all_good = res.iter().all(|r| matches!(r.status, Status::Done)); - - if !all_good { - return Err(BoxDynError::from( - "One or more collector tasks failed", - )); - } + .await?; + if (results.iter().all(|s| matches!(s.status, Status::Done))) { let sorted_results = { // Match the order of incoming_nodes by matching NodeIndex incoming_nodes .iter() .rev() .map(|node_index| { - let task_id = collector_tasks + let task_id = context.node_task_ids .iter() - .find(|(n, _)| n == node_index) + .find(|(n, _)| *n == node_index) .map(|(_, task_id)| task_id) .expect("TaskId for incoming node not found"); - res.iter().find(|r| &r.task_id == task_id).expect( + results.iter().find(|r| &r.task_id == task_id).expect( "TaskResult for incoming node's task_id not found", ) }) .collect::>() }; - - let args = sorted_results - .into_iter() - .map(|s| { - let inner = s.result.as_ref().unwrap(); - let decoded: DagExecutionResponse< - B::Compact, - B::Context, - B::IdType, - > = B::Codec::decode(inner) - .map_err(|e: CdcErr| e.into())?; - match decoded { - DagExecutionResponse::EnqueueNext { - result, - context, - } => Ok(result), - _ => Err(BoxDynError::from( - "Unexpected response type from collector task", - )), - } - }) - .collect::, BoxDynError>>()?; - let mut completed_nodes = collector_tasks - .iter() - .map(|(node, _)| *node) - .collect::>(); - completed_nodes.insert(context.current_node); - - let task = TaskBuilder::new( - B::Codec::encode(&args).map_err(|e| e.into())?, + let encoded_input = B::Codec::encode( + &sorted_results + .iter() + .map(|s| match &s.result { + Ok(val) => { + let decoded: DagExecutionResponse< + B::Compact, + B::IdType, + > = B::Codec::decode(val).map_err(|e: CdcErr| { + format!( + "Failed to decode dependency result: {:?}", + e.into() + ) + })?; + match decoded { + DagExecutionResponse::FanOut { response } => { + return Ok(response); + } + DagExecutionResponse::EnqueuedNext { result } => { + return Ok(result); + } + // DagExecutionResponse::Complete { result } => { + // Ok(result) + // } + _ => Err(format!( + "Dependency task returned Complete response, which is unexpected during fan-in" + )) + } + } + Err(e) => { + return Err(format!( + "Dependency task failed: {:?}", + e + )); + } + }) + .collect::, String>>()?, ) - .with_task_id(TaskId::new(B::IdType::generate())) - .meta(DagflowContext { - current_node: outgoing_node, - completed_nodes, - node_task_ids: collector_tasks.clone().into_iter().collect(), - current_position: context.current_position + 1, - is_initial: false, - root_task_id: context.root_task_id.clone(), - _phantom: std::marker::PhantomData, - }) - .build(); - b.send(task).await.map_err(|e| BoxDynError::from(e))?; + .map_err(|e| e.into())?; + let req = req.map(|args| encoded_input); // Replace args with fan-in input + let response = executor.call(req).await?; + (response, context) + } else { + return Ok(DagExecutionResponse::WaitingForDependencies { + pending_dependencies: dependency_task_ids, + }); } + } + } + } - println!("Collector results enqueued for next nodes"); - - Ok::<(), BoxDynError>(()) + Err(e) => { + tracing::debug!( + task_id = ?req.parts.task_id, + "Extracted DagflowContext for task without meta" + ); + // if no metadata, we assume its an entry task + match start_nodes.len() { + 1 => { + tracing::debug!(task_id = ?req.parts.task_id, "Single start node detected, proceeding with execution"); + let context = DagflowContext::new(req.parts.task_id.clone()); + let task_id = req.parts.task_id.clone(); + req.parts + .ctx + .inject(context.clone()) + .map_err(|e| e.into())?; + let response = executor.call(req).await?; + tracing::debug!(node = ?context.current_node, task_id = ?task_id, "Execution complete at node"); + (response, context) + } + _ => { + let new_node_task_ids = fan_out_entry_nodes( + &executor, + &mut backend, + &mut DagflowContext::new(req.parts.task_id.clone()), + &req.args, + ) + .await?; + return Ok(DagExecutionResponse::EntryFanOut { + node_task_ids: new_node_task_ids, + }); } } - .boxed(); - // Await all enqueue operations - enqueue_futures.push(collector_future); - futures::future::try_join_all(enqueue_futures).await?; } - _ => { /* No action needed for other variants */ } + }; + // At this point we know a node was executed and we have its context + // We need to figure out the outgoing nodes and enqueue tasks for them + let current_node = context.current_node; + let outgoing_nodes = executor + .graph + .neighbors_directed(current_node, Direction::Outgoing) + .collect::>(); + + match outgoing_nodes.len() { + 0 => { + // This was an end node + return Ok(DagExecutionResponse::Complete { result: response }); + } + 1 => { + // Single outgoing node, enqueue task for it + let next_node = outgoing_nodes[0]; + let mut new_context = context.clone(); + new_context.prev_node = Some(current_node); + new_context.current_node = next_node; + new_context.current_position += 1; + new_context.is_initial = false; + + let task = TaskBuilder::new(response.clone()) + .with_task_id(TaskId::new(B::IdType::generate())) + .meta(new_context) + .build(); + backend.send(task).await.map_err(|e| BoxDynError::from(e))?; + } + _ => { + // Multiple outgoing nodes, fan out + let mut new_context = context.clone(); + new_context.prev_node = Some(current_node); + new_context.current_position += 1; + new_context.is_initial = false; + + let next_task_ids = fan_out_next_nodes( + &executor, + outgoing_nodes, + &mut backend, + &mut new_context, + &response, + ) + .await?; + return Ok(DagExecutionResponse::FanOut { + response, + }); + } } - Ok(response) + Ok(DagExecutionResponse::EnqueuedNext { result: response }) } .boxed() } } -#[cfg(test)] -mod tests { - use super::*; +async fn fan_out_next_nodes( + executor: &DagExecutor, + outgoing_nodes: Vec, + backend: &mut B, + context: &mut DagflowContext, + input: &B::Compact, +) -> Result>, BoxDynError> +where + B::IdType: GenerateId + Send + Sync + 'static + PartialEq + Debug, + B::Compact: Send + Sync + 'static + Clone, + B::Context: Send + Sync + Default + MetadataExt> + 'static, + B: Sink, Error = Err> + Unpin, + Err: std::error::Error + Send + Sync + 'static, + B: BackendExt + Send + Sync + 'static + Clone + WaitForCompletion, + B::Codec: Codec, Compact = B::Compact, Error = CdcErr>, + CdcErr: Into, +{ + let mut enqueue_futures = vec![]; + let next_nodes = outgoing_nodes + .iter() + .map(|node| (*node, TaskId::new(B::IdType::generate()))) + .collect::>>(); + let mut node_task_ids = next_nodes.clone(); + node_task_ids.extend(context.node_task_ids.clone()); + for outgoing_node in outgoing_nodes.into_iter() { + let task_id = next_nodes + .get(&outgoing_node) + .expect("TaskId for start node not found") + .clone(); + let task = TaskBuilder::new(input.clone()) + .with_task_id(task_id.clone()) + .meta(DagflowContext { + prev_node: context.prev_node.clone(), + current_node: outgoing_node, + completed_nodes: context.completed_nodes.clone(), + node_task_ids: node_task_ids.clone(), + current_position: context.current_position + 1, + is_initial: context.is_initial, + root_task_id: context.root_task_id.clone(), + }) + .build(); + let mut b = backend.clone(); + enqueue_futures.push( + async move { + b.send(task).await.map_err(|e| BoxDynError::from(e))?; + Ok::<(), BoxDynError>(()) + } + .boxed(), + ); + } + futures::future::try_join_all(enqueue_futures).await?; + Ok(next_nodes) +} - #[tokio::test] - async fn test_dag_executor_service() { - // This would test the Service implementation - // You would create a DagExecutor, create a Task, and call the service +async fn fan_out_entry_nodes( + executor: &DagExecutor, + backend: &mut B, + context: &mut DagflowContext, + input: &B::Compact, +) -> Result>, BoxDynError> +where + B::IdType: GenerateId + Send + Sync + 'static + PartialEq + Debug, + B::Compact: Send + Sync + 'static + Clone, + B::Context: Send + Sync + Default + MetadataExt> + 'static, + B: Sink, Error = Err> + Unpin, + Err: std::error::Error + Send + Sync + 'static, + B: BackendExt + Send + Sync + 'static + Clone + WaitForCompletion, + B::Codec: Codec, Compact = B::Compact, Error = CdcErr>, + CdcErr: Into, +{ + let values: Vec = B::Codec::decode(input).map_err(|e: CdcErr| e.into())?; + let start_nodes = executor.start_nodes.clone(); + if (values.len() != start_nodes.len()) { + return Err(BoxDynError::from(format!( + "Expected {} inputs for fan-in, got {}", + start_nodes.len(), + values.len() + ))); + } + let mut enqueue_futures = vec![]; + let next_nodes = start_nodes + .iter() + .map(|node| (*node, TaskId::new(B::IdType::generate()))) + .collect::>>(); + let mut node_task_ids = next_nodes.clone(); + node_task_ids.extend(context.node_task_ids.clone()); + for (outgoing_node, input) in start_nodes.into_iter().zip(values) { + let task_id = next_nodes + .get(&outgoing_node) + .expect("TaskId for start node not found") + .clone(); + let task = TaskBuilder::new(input) + .with_task_id(task_id.clone()) + .meta(DagflowContext { + prev_node: None, + current_node: outgoing_node, + completed_nodes: Default::default(), + node_task_ids: node_task_ids.clone(), + current_position: context.current_position, + is_initial: true, + root_task_id: context.root_task_id.clone(), + }) + .build(); + let mut b = backend.clone(); + enqueue_futures.push( + async move { + b.send(task).await.map_err(|e| BoxDynError::from(e))?; + Ok::<(), BoxDynError>(()) + } + .boxed(), + ); } + futures::future::try_join_all(enqueue_futures).await?; + Ok(next_nodes) } diff --git a/apalis-workflow/src/filter_map/mod.rs b/apalis-workflow/src/filter_map/mod.rs index be2b4769..f047ebb6 100644 --- a/apalis-workflow/src/filter_map/mod.rs +++ b/apalis-workflow/src/filter_map/mod.rs @@ -257,7 +257,8 @@ where + WaitForCompletion>> + Unpin, F: Service, Error = BoxDynError, Response = Option> - + Send + Sync + + Send + + Sync + 'static + Clone, S: Step, B>, diff --git a/apalis-workflow/src/fold/mod.rs b/apalis-workflow/src/fold/mod.rs index b7a65056..652ab94c 100644 --- a/apalis-workflow/src/fold/mod.rs +++ b/apalis-workflow/src/fold/mod.rs @@ -70,7 +70,8 @@ impl, Init, B, MetaErr, Err, CodecErr for FoldStep where F: Service, Response = Init> - + Send + Sync + + Send + + Sync + 'static + Clone, S: Step, diff --git a/apalis-workflow/src/lib.rs b/apalis-workflow/src/lib.rs index 83b777a0..c9c64aa8 100644 --- a/apalis-workflow/src/lib.rs +++ b/apalis-workflow/src/lib.rs @@ -15,8 +15,7 @@ type BoxedService = tower::util::BoxCloneSyncService = BoxedService, GoTo>>; -type DagService = - BoxedService, Compact>; +type DagService = BoxedService, Compact>; /// combinator for sequential workflow execution. pub mod and_then; @@ -45,7 +44,7 @@ pub mod step; /// workflow definitions. pub mod workflow; -pub use {dag::DagExecutor, dag::DagFlow, sink::WorkflowSink, workflow::Workflow}; +pub use {dag::DagFlow, dag::executor::DagExecutor, sink::WorkflowSink, workflow::Workflow}; #[cfg(test)] mod tests { diff --git a/utils/apalis-file-storage/src/util.rs b/utils/apalis-file-storage/src/util.rs index ba69970f..26f5e6bd 100644 --- a/utils/apalis-file-storage/src/util.rs +++ b/utils/apalis-file-storage/src/util.rs @@ -187,6 +187,14 @@ where status: Status::Pending, }; if let Some(value) = self.get(&key) { + if value.result.is_none() { + results.push(apalis_core::backend::TaskResult { + task_id: task_id.clone(), + status: Status::Pending, + result: Err(format!("Task still pending")), + }); + continue; + } let result = match serde_json::from_value::>(value.result.unwrap()) { Ok(result) => apalis_core::backend::TaskResult { From 7c6f11aa694ef76d7349a0d118e522c94f9806a6 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Mon, 22 Dec 2025 19:26:41 +0300 Subject: [PATCH 04/12] chore: add more checks and fixes --- apalis-core/src/backend/impls/dequeue.rs | 1 + apalis-core/src/backend/impls/mod.rs | 6 ++-- apalis-core/src/backend/mod.rs | 1 + apalis-workflow/src/dag/service.rs | 41 +++++++++++++++--------- 4 files changed, 31 insertions(+), 18 deletions(-) diff --git a/apalis-core/src/backend/impls/dequeue.rs b/apalis-core/src/backend/impls/dequeue.rs index 91d2da5d..db158fe8 100644 --- a/apalis-core/src/backend/impls/dequeue.rs +++ b/apalis-core/src/backend/impls/dequeue.rs @@ -1,3 +1,4 @@ +#![cfg(feature = "sleep")] use std::{ collections::VecDeque, fmt, diff --git a/apalis-core/src/backend/impls/mod.rs b/apalis-core/src/backend/impls/mod.rs index b79c4343..dc5e25dc 100644 --- a/apalis-core/src/backend/impls/mod.rs +++ b/apalis-core/src/backend/impls/mod.rs @@ -1,4 +1,6 @@ -pub(super) mod dequeue; +/// Backend implementation based on VecDeque +pub(crate) mod dequeue; /// A guide to using the implementing a backend pub mod guide; -pub(super) mod memory; +/// In-memory backend implementation +pub(crate) mod memory; diff --git a/apalis-core/src/backend/mod.rs b/apalis-core/src/backend/mod.rs index 227ed174..b779f0e9 100644 --- a/apalis-core/src/backend/mod.rs +++ b/apalis-core/src/backend/mod.rs @@ -51,6 +51,7 @@ pub mod memory { } /// In-memory dequeue backend +#[cfg(feature = "sleep")] pub mod dequeue { pub use crate::backend::impls::dequeue::*; } diff --git a/apalis-workflow/src/dag/service.rs b/apalis-workflow/src/dag/service.rs index 005c76ec..8542dd27 100644 --- a/apalis-workflow/src/dag/service.rs +++ b/apalis-workflow/src/dag/service.rs @@ -107,8 +107,8 @@ where .await; let (response, context) = match ctx { Ok(Meta(mut context)) => { + #[cfg(feature = "tracing")] tracing::debug!( - task_id = ?req.parts.task_id, node = ?context.current_node, "Extracted DagflowContext for task" ); @@ -136,8 +136,8 @@ where _ => { let dependency_task_ids = context.get_dependency_task_ids(&incoming_nodes); + #[cfg(feature = "tracing")] tracing::debug!( - task_id = ?req.parts.task_id, prev_node = ?context.prev_node, node = ?context.current_node, deps = ?dependency_task_ids, @@ -151,7 +151,7 @@ where if (results.iter().all(|s| matches!(s.status, Status::Done))) { let sorted_results = { // Match the order of incoming_nodes by matching NodeIndex - incoming_nodes + let res =incoming_nodes .iter() .rev() .map(|node_index| { @@ -159,12 +159,19 @@ where .iter() .find(|(n, _)| *n == node_index) .map(|(_, task_id)| task_id) - .expect("TaskId for incoming node not found"); - results.iter().find(|r| &r.task_id == task_id).expect( - "TaskResult for incoming node's task_id not found", - ) + .ok_or(BoxDynError::from("TaskId for incoming node not found"))?; + let task_result = results.iter().find(|r| &r.task_id == task_id).ok_or( + BoxDynError::from(format!( + "TaskResult for task_id {:?} not found", + task_id + )))?; + Ok(task_result) }) - .collect::>() + .collect::, BoxDynError>>(); + match res { + Ok(v) => v, + Err(e) => return Ok(DagExecutionResponse::WaitingForDependencies { pending_dependencies: dependency_task_ids }), + } }; let encoded_input = B::Codec::encode( &sorted_results @@ -187,11 +194,11 @@ where DagExecutionResponse::EnqueuedNext { result } => { return Ok(result); } - // DagExecutionResponse::Complete { result } => { - // Ok(result) - // } + DagExecutionResponse::Complete { result } => { + Ok(result) + } _ => Err(format!( - "Dependency task returned Complete response, which is unexpected during fan-in" + "Dependency task returned invalid response, which is unexpected during fan-in" )) } } @@ -218,14 +225,15 @@ where } Err(e) => { + #[cfg(feature = "tracing")] tracing::debug!( - task_id = ?req.parts.task_id, - "Extracted DagflowContext for task without meta" + "Extracting DagflowContext for task without meta" ); // if no metadata, we assume its an entry task match start_nodes.len() { 1 => { - tracing::debug!(task_id = ?req.parts.task_id, "Single start node detected, proceeding with execution"); + #[cfg(feature = "tracing")] + tracing::debug!("Single start node detected, proceeding with execution"); let context = DagflowContext::new(req.parts.task_id.clone()); let task_id = req.parts.task_id.clone(); req.parts @@ -233,7 +241,8 @@ where .inject(context.clone()) .map_err(|e| e.into())?; let response = executor.call(req).await?; - tracing::debug!(node = ?context.current_node, task_id = ?task_id, "Execution complete at node"); + #[cfg(feature = "tracing")] + tracing::debug!(node = ?context.current_node, "Execution complete at node"); (response, context) } _ => { From 7a4d63122ebf4925187b82a245eb68f45cd70dab Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Mon, 22 Dec 2025 20:03:27 +0300 Subject: [PATCH 05/12] chore: fix wrong persist call --- Cargo.toml | 2 +- utils/apalis-file-storage/src/lib.rs | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index b3b9536d..c81025b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -62,4 +62,4 @@ verbose_file_reads = "warn" [workspace.metadata.cargo-udeps.ignore] normal = ["metrics-exporter-prometheus", "document-features"] -development = ["apalis-workflow"] +development = ["apalis-workflow", "apalis-file-storage"] diff --git a/utils/apalis-file-storage/src/lib.rs b/utils/apalis-file-storage/src/lib.rs index dddaffc2..677851fe 100644 --- a/utils/apalis-file-storage/src/lib.rs +++ b/utils/apalis-file-storage/src/lib.rs @@ -191,14 +191,13 @@ impl JsonStorage { /// Persist all current data to disk by rewriting the file fn persist_to_disk(&self) -> std::io::Result<()> { - let tmp_path = self.path.with_extension("tmp"); - + let tmp_path = &self.path; { let tmp_file = OpenOptions::new() .write(true) .create(true) .truncate(true) - .open(&tmp_path)?; + .open(tmp_path)?; let mut writer = BufWriter::new(tmp_file); for (key, value) in self.tasks.try_read().unwrap().iter() { From 484e7e5ebc7c22caac43c760f0099b00e46b2522 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Mon, 22 Dec 2025 20:05:12 +0300 Subject: [PATCH 06/12] changelog: entry --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7423edbb..4effeda6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ All notable changes to this project are documented in this file. ## [Unreleased] +- **chore**: bump: introducing rc.1 ([#646](https://github.com/geofmureithi/apalis/pull/646)) - **chore**: feat: refactor and granulize traits ([#586](https://github.com/geofmureithi/apalis/pull/586)) - **refactor**: refactor: crates, workflow and BackendExt ([#623](https://github.com/geofmureithi/apalis/pull/623)) - **chore**: bump to v1.0.0 beta.1 ([#624](https://github.com/geofmureithi/apalis/pull/624)) @@ -19,6 +20,7 @@ All notable changes to this project are documented in this file. - **crates**: Moved backend crates to respective repos ([#586](https://github.com/geofmureithi/apalis/pull/586)) - **api**: `Backend` must be the second input in `WorkerBuilder` ([#586](https://github.com/geofmureithi/apalis/pull/586)) + ```rust let worker = WorkerBuilder::new("tasty-banana") .backend(sqlite) @@ -27,14 +29,18 @@ let worker = WorkerBuilder::new("tasty-banana") // .data .build(task_fn); ``` + - **api**: `Monitor` supports restarts and factory() becomes factory(usize) ([#586](https://github.com/geofmureithi/apalis/pull/586)) + ```rust Monitor::new() .register({ WorkerBuilder::new("tasty-banana") .... ``` + Becomes + ```rust Monitor::new() .register(|runs: usize| { From db60a2addc2b203ad8e8b944da5ad455a5f28a7f Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Mon, 22 Dec 2025 20:13:35 +0300 Subject: [PATCH 07/12] chore: vet and update deps --- Cargo.lock | 52 +++---- supply-chain/config.toml | 296 ++++++++++---------------------------- supply-chain/imports.lock | 8 ++ 3 files changed, 108 insertions(+), 248 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8377ebb3..86478b21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -178,9 +178,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.15.1" +version = "1.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b5ce75405893cd713f9ab8e297d8e438f624dde7d706108285f7e17a25a180f" +checksum = "6a88aab2464f1f25453baa7a07c84c5b7684e274054ba06817f382357f77a288" dependencies = [ "aws-lc-sys", "zeroize", @@ -188,9 +188,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.34.0" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "179c3777a8b5e70e90ea426114ffc565b2c1a9f82f6c4a0c5a34aa6ef5e781b6" +checksum = "b45afffdee1e7c9126814751f88dddc747f41d91da16c9551a0f1e8a11e788a1" dependencies = [ "cc", "cmake", @@ -326,9 +326,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.0" +version = "3.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" +checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" [[package]] name = "byteorder" @@ -357,9 +357,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.49" +version = "1.2.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" +checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c" dependencies = [ "find-msvc-tools", "jobserver", @@ -395,9 +395,9 @@ dependencies = [ [[package]] name = "cmake" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b042e5d8a74ae91bb0961acd039822472ec99f8ab0948cbf6d1369588f8be586" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ "cc", ] @@ -1195,9 +1195,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" [[package]] name = "jobserver" @@ -1733,9 +1733,9 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "portable-atomic" -version = "1.11.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" +checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd" [[package]] name = "potential_utf" @@ -1935,9 +1935,9 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.12.26" +version = "0.12.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b4c14b2d9afca6a60277086b0cc6a6ae0b568f6f7916c943a8cdc79f8be240f" +checksum = "8e893f6bece5953520ddbb3f8f46f3ef36dd1fef4ee9b087c4b4a725fd5d10e4" dependencies = [ "base64", "bytes", @@ -2077,9 +2077,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" +checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" dependencies = [ "zeroize", ] @@ -2104,9 +2104,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" [[package]] name = "schannel" @@ -2363,9 +2363,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.145" +version = "1.0.146" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +checksum = "217ca874ae0207aac254aa02c957ded05585a90892cc8d87f9e5fa49669dadd8" dependencies = [ "itoa", "memchr", @@ -2757,9 +2757,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "log", "pin-project-lite", @@ -2780,9 +2780,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.35" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", "valuable", diff --git a/supply-chain/config.toml b/supply-chain/config.toml index 832194fa..49d7ee4f 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -32,38 +32,30 @@ criteria = "safe-to-deploy" version = "1.1.4" criteria = "safe-to-deploy" -[[exemptions.aligned-vec]] -version = "0.6.4" -criteria = "safe-to-run" - [[exemptions.android_system_properties]] version = "0.1.5" criteria = "safe-to-deploy" -[[exemptions.anes]] -version = "0.1.6" -criteria = "safe-to-run" - -[[exemptions.anstyle]] -version = "1.0.13" -criteria = "safe-to-run" - [[exemptions.anyhow]] version = "1.0.100" criteria = "safe-to-deploy" +[[exemptions.apalis]] +version = "1.0.0-beta.2" +criteria = "safe-to-deploy" + [[exemptions.apalis-core]] version = "1.0.0-rc.1" criteria = "safe-to-deploy" +[[exemptions.apalis-sql]] +version = "1.0.0-beta.2" +criteria = "safe-to-deploy" + [[exemptions.apalis-workflow]] version = "0.1.0-rc.1" criteria = "safe-to-deploy" -[[exemptions.arrayvec]] -version = "0.7.6" -criteria = "safe-to-run" - [[exemptions.async-trait]] version = "0.1.89" criteria = "safe-to-deploy" @@ -77,11 +69,11 @@ version = "1.5.0" criteria = "safe-to-deploy" [[exemptions.aws-lc-rs]] -version = "1.15.1" +version = "1.15.2" criteria = "safe-to-deploy" [[exemptions.aws-lc-sys]] -version = "0.34.0" +version = "0.35.0" criteria = "safe-to-deploy" [[exemptions.axum]] @@ -100,6 +92,14 @@ criteria = "safe-to-deploy" version = "0.22.1" criteria = "safe-to-deploy" +[[exemptions.bincode]] +version = "2.0.1" +criteria = "safe-to-deploy" + +[[exemptions.bincode_derive]] +version = "2.0.1" +criteria = "safe-to-deploy" + [[exemptions.bitflags]] version = "1.3.2" criteria = "safe-to-deploy" @@ -113,13 +113,9 @@ version = "0.6.2" criteria = "safe-to-deploy" [[exemptions.bumpalo]] -version = "3.19.0" +version = "3.19.1" criteria = "safe-to-deploy" -[[exemptions.bytemuck]] -version = "1.24.0" -criteria = "safe-to-run" - [[exemptions.byteorder]] version = "1.5.0" criteria = "safe-to-deploy" @@ -128,12 +124,8 @@ criteria = "safe-to-deploy" version = "1.11.0" criteria = "safe-to-deploy" -[[exemptions.cast]] -version = "0.3.0" -criteria = "safe-to-run" - [[exemptions.cc]] -version = "1.2.47" +version = "1.2.50" criteria = "safe-to-deploy" [[exemptions.cfg-if]] @@ -148,32 +140,8 @@ criteria = "safe-to-deploy" version = "0.4.42" criteria = "safe-to-deploy" -[[exemptions.ciborium]] -version = "0.2.2" -criteria = "safe-to-run" - -[[exemptions.ciborium-io]] -version = "0.2.2" -criteria = "safe-to-run" - -[[exemptions.ciborium-ll]] -version = "0.2.2" -criteria = "safe-to-run" - -[[exemptions.clap]] -version = "4.5.53" -criteria = "safe-to-run" - -[[exemptions.clap_builder]] -version = "4.5.53" -criteria = "safe-to-run" - -[[exemptions.clap_lex]] -version = "0.7.6" -criteria = "safe-to-run" - [[exemptions.cmake]] -version = "0.1.54" +version = "0.1.57" criteria = "safe-to-deploy" [[exemptions.core-foundation]] @@ -188,22 +156,6 @@ criteria = "safe-to-deploy" version = "0.8.7" criteria = "safe-to-deploy" -[[exemptions.cpp_demangle]] -version = "0.4.5" -criteria = "safe-to-run" - -[[exemptions.criterion]] -version = "0.7.0" -criteria = "safe-to-run" - -[[exemptions.criterion-plot]] -version = "0.6.0" -criteria = "safe-to-run" - -[[exemptions.crossbeam-deque]] -version = "0.8.6" -criteria = "safe-to-run" - [[exemptions.crossbeam-epoch]] version = "0.9.18" criteria = "safe-to-deploy" @@ -212,10 +164,6 @@ criteria = "safe-to-deploy" version = "0.8.21" criteria = "safe-to-deploy" -[[exemptions.crunchy]] -version = "0.2.4" -criteria = "safe-to-run" - [[exemptions.debugid]] version = "0.8.0" criteria = "safe-to-deploy" @@ -240,10 +188,6 @@ criteria = "safe-to-deploy" version = "1.0.5" criteria = "safe-to-deploy" -[[exemptions.either]] -version = "1.15.0" -criteria = "safe-to-run" - [[exemptions.email_address]] version = "0.2.9" criteria = "safe-to-deploy" @@ -252,14 +196,6 @@ criteria = "safe-to-deploy" version = "0.10.2" criteria = "safe-to-deploy" -[[exemptions.equator]] -version = "0.4.2" -criteria = "safe-to-run" - -[[exemptions.equator-macro]] -version = "0.4.2" -criteria = "safe-to-run" - [[exemptions.equivalent]] version = "1.0.2" criteria = "safe-to-deploy" @@ -292,6 +228,10 @@ criteria = "safe-to-deploy" version = "0.1.5" criteria = "safe-to-deploy" +[[exemptions.foldhash]] +version = "0.2.0" +criteria = "safe-to-deploy" + [[exemptions.foreign-types]] version = "0.3.2" criteria = "safe-to-deploy" @@ -364,10 +304,6 @@ criteria = "safe-to-deploy" version = "0.4.12" criteria = "safe-to-deploy" -[[exemptions.half]] -version = "2.7.1" -criteria = "safe-to-run" - [[exemptions.hashbrown]] version = "0.15.5" criteria = "safe-to-deploy" @@ -389,7 +325,7 @@ version = "0.4.3" criteria = "safe-to-deploy" [[exemptions.hostname]] -version = "0.4.1" +version = "0.4.2" criteria = "safe-to-deploy" [[exemptions.http]] @@ -445,7 +381,7 @@ version = "0.6.0" criteria = "safe-to-deploy" [[exemptions.hyper-util]] -version = "0.1.18" +version = "0.1.19" criteria = "safe-to-deploy" [[exemptions.iana-time-zone]] @@ -473,11 +409,11 @@ version = "2.1.1" criteria = "safe-to-deploy" [[exemptions.icu_properties]] -version = "2.1.1" +version = "2.1.2" criteria = "safe-to-deploy" [[exemptions.icu_properties_data]] -version = "2.1.1" +version = "2.1.2" criteria = "safe-to-deploy" [[exemptions.icu_provider]] @@ -496,10 +432,6 @@ criteria = "safe-to-deploy" version = "2.12.1" criteria = "safe-to-deploy" -[[exemptions.inferno]] -version = "0.11.21" -criteria = "safe-to-run" - [[exemptions.ipnet]] version = "2.11.0" criteria = "safe-to-deploy" @@ -512,12 +444,8 @@ criteria = "safe-to-deploy" version = "0.4.17" criteria = "safe-to-deploy" -[[exemptions.itertools]] -version = "0.13.0" -criteria = "safe-to-run" - [[exemptions.itoa]] -version = "1.0.15" +version = "1.0.16" criteria = "safe-to-deploy" [[exemptions.jobserver]] @@ -525,7 +453,7 @@ version = "0.1.34" criteria = "safe-to-deploy" [[exemptions.js-sys]] -version = "0.3.82" +version = "0.3.83" criteria = "safe-to-deploy" [[exemptions.lazy_static]] @@ -533,7 +461,7 @@ version = "1.5.0" criteria = "safe-to-deploy" [[exemptions.libc]] -version = "0.2.177" +version = "0.2.178" criteria = "safe-to-deploy" [[exemptions.linux-raw-sys]] @@ -553,7 +481,7 @@ version = "0.4.14" criteria = "safe-to-deploy" [[exemptions.log]] -version = "0.4.28" +version = "0.4.29" criteria = "safe-to-deploy" [[exemptions.matchers]] @@ -568,12 +496,8 @@ criteria = "safe-to-deploy" version = "2.7.6" criteria = "safe-to-deploy" -[[exemptions.memmap2]] -version = "0.9.9" -criteria = "safe-to-run" - [[exemptions.metrics]] -version = "0.24.2" +version = "0.24.3" criteria = "safe-to-deploy" [[exemptions.metrics-exporter-prometheus]] @@ -581,7 +505,7 @@ version = "0.17.2" criteria = "safe-to-deploy" [[exemptions.metrics-util]] -version = "0.20.0" +version = "0.20.1" criteria = "safe-to-deploy" [[exemptions.mime]] @@ -593,17 +517,13 @@ version = "0.8.9" criteria = "safe-to-deploy" [[exemptions.mio]] -version = "1.1.0" +version = "1.1.1" criteria = "safe-to-deploy" [[exemptions.native-tls]] version = "0.2.14" criteria = "safe-to-deploy" -[[exemptions.nix]] -version = "0.26.4" -criteria = "safe-to-run" - [[exemptions.nix]] version = "0.30.1" criteria = "safe-to-deploy" @@ -616,10 +536,6 @@ criteria = "safe-to-deploy" version = "0.1.0" criteria = "safe-to-deploy" -[[exemptions.num-format]] -version = "0.4.4" -criteria = "safe-to-run" - [[exemptions.num-traits]] version = "0.2.19" criteria = "safe-to-deploy" @@ -688,10 +604,6 @@ criteria = "safe-to-deploy" version = "1.21.3" criteria = "safe-to-deploy" -[[exemptions.oorandom]] -version = "11.1.5" -criteria = "safe-to-run" - [[exemptions.openssl]] version = "0.10.75" criteria = "safe-to-deploy" @@ -709,7 +621,7 @@ version = "0.9.111" criteria = "safe-to-deploy" [[exemptions.os_info]] -version = "3.13.0" +version = "3.14.0" criteria = "safe-to-deploy" [[exemptions.parking_lot]] @@ -720,6 +632,10 @@ criteria = "safe-to-deploy" version = "0.9.12" criteria = "safe-to-deploy" +[[exemptions.paste]] +version = "1.0.15" +criteria = "safe-to-deploy" + [[exemptions.percent-encoding]] version = "2.3.2" criteria = "safe-to-deploy" @@ -748,20 +664,8 @@ criteria = "safe-to-deploy" version = "0.3.32" criteria = "safe-to-deploy" -[[exemptions.plotters]] -version = "0.3.7" -criteria = "safe-to-run" - -[[exemptions.plotters-backend]] -version = "0.3.7" -criteria = "safe-to-run" - -[[exemptions.plotters-svg]] -version = "0.3.7" -criteria = "safe-to-run" - [[exemptions.portable-atomic]] -version = "1.11.1" +version = "1.12.0" criteria = "safe-to-deploy" [[exemptions.potential_utf]] @@ -772,10 +676,6 @@ criteria = "safe-to-deploy" version = "0.2.0" criteria = "safe-to-deploy" -[[exemptions.pprof]] -version = "0.15.0" -criteria = "safe-to-run" - [[exemptions.ppv-lite86]] version = "0.2.21" criteria = "safe-to-deploy" @@ -788,10 +688,6 @@ criteria = "safe-to-deploy" version = "0.12.6" criteria = "safe-to-deploy" -[[exemptions.quick-xml]] -version = "0.26.0" -criteria = "safe-to-run" - [[exemptions.quote]] version = "1.0.42" criteria = "safe-to-deploy" @@ -832,14 +728,6 @@ criteria = "safe-to-deploy" version = "11.6.0" criteria = "safe-to-deploy" -[[exemptions.rayon]] -version = "1.11.0" -criteria = "safe-to-run" - -[[exemptions.rayon-core]] -version = "1.13.0" -criteria = "safe-to-run" - [[exemptions.redox_syscall]] version = "0.5.18" criteria = "safe-to-deploy" @@ -857,17 +745,21 @@ version = "0.8.8" criteria = "safe-to-deploy" [[exemptions.reqwest]] -version = "0.12.24" +version = "0.12.27" criteria = "safe-to-deploy" -[[exemptions.rgb]] -version = "0.8.52" -criteria = "safe-to-run" - [[exemptions.ring]] version = "0.17.14" criteria = "safe-to-deploy" +[[exemptions.rmp]] +version = "0.8.14" +criteria = "safe-to-deploy" + +[[exemptions.rmp-serde]] +version = "1.3.0" +criteria = "safe-to-deploy" + [[exemptions.rustc-demangle]] version = "0.1.26" criteria = "safe-to-deploy" @@ -889,7 +781,7 @@ version = "0.8.2" criteria = "safe-to-deploy" [[exemptions.rustls-pki-types]] -version = "1.13.0" +version = "1.13.2" criteria = "safe-to-deploy" [[exemptions.rustls-webpki]] @@ -901,13 +793,9 @@ version = "1.0.22" criteria = "safe-to-deploy" [[exemptions.ryu]] -version = "1.0.20" +version = "1.0.21" criteria = "safe-to-deploy" -[[exemptions.same-file]] -version = "1.0.6" -criteria = "safe-to-run" - [[exemptions.schannel]] version = "0.1.28" criteria = "safe-to-deploy" @@ -980,10 +868,6 @@ criteria = "safe-to-deploy" version = "1.0.228" criteria = "safe-to-deploy" -[[exemptions.serde_bytes]] -version = "0.11.19" -criteria = "safe-to-deploy" - [[exemptions.serde_core]] version = "1.0.228" criteria = "safe-to-deploy" @@ -993,7 +877,7 @@ version = "1.0.228" criteria = "safe-to-deploy" [[exemptions.serde_json]] -version = "1.0.145" +version = "1.0.146" criteria = "safe-to-deploy" [[exemptions.serde_urlencoded]] @@ -1032,30 +916,14 @@ criteria = "safe-to-deploy" version = "0.6.1" criteria = "safe-to-deploy" -[[exemptions.spin]] -version = "0.10.0" -criteria = "safe-to-run" - [[exemptions.stable_deref_trait]] version = "1.2.1" criteria = "safe-to-deploy" -[[exemptions.str_stack]] -version = "0.1.0" -criteria = "safe-to-run" - [[exemptions.subtle]] version = "2.6.1" criteria = "safe-to-deploy" -[[exemptions.symbolic-common]] -version = "12.17.0" -criteria = "safe-to-run" - -[[exemptions.symbolic-demangle]] -version = "12.17.0" -criteria = "safe-to-run" - [[exemptions.syn]] version = "2.0.111" criteria = "safe-to-deploy" @@ -1116,10 +984,6 @@ criteria = "safe-to-deploy" version = "0.8.2" criteria = "safe-to-deploy" -[[exemptions.tinytemplate]] -version = "1.2.1" -criteria = "safe-to-run" - [[exemptions.tokio]] version = "1.48.0" criteria = "safe-to-deploy" @@ -1153,7 +1017,7 @@ version = "0.3.5" criteria = "safe-to-deploy" [[exemptions.tower-http]] -version = "0.6.7" +version = "0.6.8" criteria = "safe-to-deploy" [[exemptions.tower-layer]] @@ -1165,11 +1029,7 @@ version = "0.3.3" criteria = "safe-to-deploy" [[exemptions.tracing]] -version = "0.1.41" -criteria = "safe-to-deploy" - -[[exemptions.tracing-attributes]] -version = "0.1.30" +version = "0.1.44" criteria = "safe-to-deploy" [[exemptions.tracing-attributes]] @@ -1177,15 +1037,7 @@ version = "0.1.31" criteria = "safe-to-deploy" [[exemptions.tracing-core]] -version = "0.1.34" -criteria = "safe-to-deploy" - -[[exemptions.tracing-core]] -version = "0.1.35" -criteria = "safe-to-deploy" - -[[exemptions.tracing-futures]] -version = "0.2.5" +version = "0.1.36" criteria = "safe-to-deploy" [[exemptions.tracing-log]] @@ -1197,7 +1049,7 @@ version = "0.2.0" criteria = "safe-to-deploy" [[exemptions.tracing-subscriber]] -version = "0.3.20" +version = "0.3.22" criteria = "safe-to-deploy" [[exemptions.try-lock]] @@ -1220,6 +1072,10 @@ criteria = "safe-to-deploy" version = "0.9.0" criteria = "safe-to-deploy" +[[exemptions.unty]] +version = "0.0.4" +criteria = "safe-to-deploy" + [[exemptions.ureq]] version = "2.12.1" criteria = "safe-to-deploy" @@ -1233,7 +1089,7 @@ version = "1.0.4" criteria = "safe-to-deploy" [[exemptions.uuid]] -version = "1.18.1" +version = "1.19.0" criteria = "safe-to-deploy" [[exemptions.valuable]] @@ -1248,9 +1104,9 @@ criteria = "safe-to-deploy" version = "0.9.5" criteria = "safe-to-deploy" -[[exemptions.walkdir]] -version = "2.5.0" -criteria = "safe-to-run" +[[exemptions.virtue]] +version = "0.0.18" +criteria = "safe-to-deploy" [[exemptions.want]] version = "0.3.1" @@ -1265,27 +1121,27 @@ version = "1.0.1+wasi-0.2.4" criteria = "safe-to-deploy" [[exemptions.wasm-bindgen]] -version = "0.2.105" +version = "0.2.106" criteria = "safe-to-deploy" [[exemptions.wasm-bindgen-futures]] -version = "0.4.55" +version = "0.4.56" criteria = "safe-to-deploy" [[exemptions.wasm-bindgen-macro]] -version = "0.2.105" +version = "0.2.106" criteria = "safe-to-deploy" [[exemptions.wasm-bindgen-macro-support]] -version = "0.2.105" +version = "0.2.106" criteria = "safe-to-deploy" [[exemptions.wasm-bindgen-shared]] -version = "0.2.105" +version = "0.2.106" criteria = "safe-to-deploy" [[exemptions.web-sys]] -version = "0.3.82" +version = "0.3.83" criteria = "safe-to-deploy" [[exemptions.web-time]] @@ -1320,10 +1176,6 @@ criteria = "safe-to-deploy" version = "0.59.3" criteria = "safe-to-deploy" -[[exemptions.windows-link]] -version = "0.1.3" -criteria = "safe-to-deploy" - [[exemptions.windows-link]] version = "0.2.1" criteria = "safe-to-deploy" @@ -1437,11 +1289,11 @@ version = "0.8.1" criteria = "safe-to-deploy" [[exemptions.zerocopy]] -version = "0.8.30" +version = "0.8.31" criteria = "safe-to-deploy" [[exemptions.zerocopy-derive]] -version = "0.8.30" +version = "0.8.31" criteria = "safe-to-deploy" [[exemptions.zerofrom]] diff --git a/supply-chain/imports.lock b/supply-chain/imports.lock index 0c397a40..9c5379bb 100644 --- a/supply-chain/imports.lock +++ b/supply-chain/imports.lock @@ -1,2 +1,10 @@ # cargo-vet imports lock + +[[unpublished.apalis]] +version = "1.0.0-rc.1" +audited_as = "1.0.0-beta.2" + +[[unpublished.apalis-sql]] +version = "1.0.0-rc.1" +audited_as = "1.0.0-beta.2" From f7edc2e97732c8cdea0ea0e01a3e40ea650a9c73 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Mon, 22 Dec 2025 23:17:29 +0300 Subject: [PATCH 08/12] chore: add docs and example for workflow --- Cargo.lock | 46 +++--- apalis-core/src/lib.rs | 2 +- apalis-sql/README.md | 2 +- apalis-workflow/README.md | 91 ++++++++++-- apalis-workflow/src/dag/context.rs | 6 +- apalis-workflow/src/dag/error.rs | 2 +- apalis-workflow/src/dag/executor.rs | 71 +++------ apalis-workflow/src/dag/mod.rs | 136 ++++++++---------- apalis-workflow/src/dag/service.rs | 26 ++-- apalis-workflow/src/step.rs | 4 +- apalis/README.md | 6 +- .../{workflow => dag-workflow}/Cargo.toml | 7 +- examples/dag-workflow/src/main.rs | 73 ++++++++++ examples/stepped-workflow/Cargo.toml | 18 +++ .../src/main.rs | 0 15 files changed, 301 insertions(+), 189 deletions(-) rename examples/{workflow => dag-workflow}/Cargo.toml (79%) create mode 100644 examples/dag-workflow/src/main.rs create mode 100644 examples/stepped-workflow/Cargo.toml rename examples/{workflow => stepped-workflow}/src/main.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 86478b21..8a5442ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -443,6 +443,21 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "dag" +version = "0.1.0" +dependencies = [ + "apalis", + "apalis-file-storage", + "apalis-workflow", + "futures", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "debugid" version = "0.8.0" @@ -2454,6 +2469,20 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" +[[package]] +name = "stepped-workflow" +version = "0.1.0" +dependencies = [ + "apalis", + "apalis-file-storage", + "apalis-workflow", + "futures", + "serde", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "subtle" version = "2.6.1" @@ -3324,23 +3353,6 @@ version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" -[[package]] -name = "workflow" -version = "0.1.0" -dependencies = [ - "apalis", - "apalis-core", - "apalis-file-storage", - "apalis-workflow", - "futures", - "serde", - "serde_json", - "tokio", - "tower 0.5.2", - "tracing", - "tracing-subscriber", -] - [[package]] name = "writeable" version = "0.6.2" diff --git a/apalis-core/src/lib.rs b/apalis-core/src/lib.rs index 19e5cea1..5ecf952f 100644 --- a/apalis-core/src/lib.rs +++ b/apalis-core/src/lib.rs @@ -302,7 +302,7 @@ //! //! # Observability //! You can track tasks using [apalis-board](https://github.com/apalis-dev/apalis-board). -//! ![Task](https://github.com/apalis-dev/apalis-board/raw/master/screenshots/task.png) +//! ![Task](https://github.com/apalis-dev/apalis-board/raw/main/screenshots/task.png) //! //! [`Backend`]: crate::backend::Backend //! [`TaskFn`]: crate::task_fn::TaskFn diff --git a/apalis-sql/README.md b/apalis-sql/README.md index 3ed080c5..6c657f15 100644 --- a/apalis-sql/README.md +++ b/apalis-sql/README.md @@ -26,7 +26,7 @@ apalis-sqlite = "1" ## Observability You can track your jobs using [apalis-board](https://github.com/apalis-dev/apalis-board). -![Task](https://github.com/apalis-dev/apalis-board/raw/master/screenshots/task.png) +![Task](https://github.com/apalis-dev/apalis-board/raw/main/screenshots/task.png) ## Licence diff --git a/apalis-workflow/README.md b/apalis-workflow/README.md index e53953d4..e09c57e7 100644 --- a/apalis-workflow/README.md +++ b/apalis-workflow/README.md @@ -4,21 +4,23 @@ This crate provides a flexible and composable workflow engine for [apalis](https ## Overview -The workflow engine allows you to define a sequence of steps in a workflow. -Workflows are built by composing steps, and can be executed using supported backends +The workflow engine allows you to define a sequence or DAG chain of steps in a workflow. +Workflows are built by composing steps/nodes, and can be executed using supported backends ## Features -- Compose workflows from reusable steps. -- Durable and resumable workflows. -- Steps are processed in a distributed manner. -- Parallel execution of steps. -- Extensible via the `Step` trait. -- Integration with `apalis` backends and workers -- Compile-time guarantees for workflows. +- Extensible, durable and resumable workflows. +- Workflows are processed in a distributed manner. +- Parallel and concurrent execution of single steps. +- Full integration with `apalis` backends, workers and middleware. +- Macro free with compile-time guarantees. ## Example +Currently `apalis-workflow` supports sequential and directed acyclic graph based workflows + +### Sequential Workflow + ```rust,ignore use apalis::prelude::*; use apalis_workflow::*; @@ -48,10 +50,73 @@ async fn main() { } ``` +### Directed Acyclic Graph + +```rust,ignore +use apalis::prelude::*; +use apalis_file_storage::JsonStorage; +use apalis_workflow::{DagFlow, WorkflowSink}; +use serde_json::Value; + +async fn get_name(user_id: u32) -> Result { + Ok(user_id.to_string()) +} + +async fn get_age(user_id: u32) -> Result { + Ok(user_id as usize + 20) +} + +async fn get_address(user_id: u32) -> Result { + Ok(user_id as usize + 100) +} + +async fn collector( + (name, age, address): (String, usize, usize), + wrk: WorkerContext, +) -> Result { + let result = name.parse::()? + age + address; + wrk.stop().unwrap(); + Ok(result) +} + + +#[tokio::main] +async fn main() -> Result<(), BoxDynError> { + let mut backend = JsonStorage::new_temp().unwrap(); + + backend + .push_start(Value::from(vec![42, 43, 44])) + .await + .unwrap(); + + let dag_flow = DagFlow::new(); + let get_name = dag_flow.node(get_name); + let get_age = dag_flow.node(get_age); + let get_address = dag_flow.node(get_address); + dag_flow + .node(collector) + .depends_on((&get_name, &get_age, &get_address)); // Order and types matters here + + dag_flow.validate()?; // Ensure DAG is valid + + info!("Executing workflow:\n{}", dag_flow); // Print the DAG structure in dot format + + WorkerBuilder::new("tasty-banana") + .backend(backend) + .enable_tracing() + .on_event(|_c, e| info!("{e}")) + .build(dag_flow) + .run() + .await?; + Ok(()) +} + +``` + ## Observability You can track your workflows using [apalis-board](https://github.com/apalis-dev/apalis-board). -![Task](https://github.com/apalis-dev/apalis-board/raw/master/screenshots/task.png) +![Task](https://github.com/apalis-dev/apalis-board/raw/main/screenshots/task.png) ## Backend Support @@ -59,7 +124,7 @@ You can track your workflows using [apalis-board](https://github.com/apalis-dev/ - [x] [SqliteStorage](https://docs.rs/apalis-sqlite#workflow-example) - [x] [RedisStorage](https://docs.rs/apalis-redis#workflow-example) - [x] [PostgresStorage](https://docs.rs/apalis-postgres#workflow-example) -- [ ] MysqlStorage +- [x] [MysqlStorage](https://docs.rs/apalis-mysql#workflow-example) - [ ] RsMq ## Roadmap @@ -70,12 +135,12 @@ You can track your workflows using [apalis-board](https://github.com/apalis-dev/ - [x] Fold - [-] Repeater - [-] Subflow -- [-] DAG +- [x] DAG ## Inspirations: - [Underway](https://github.com/maxcountryman/underway): Postgres-only `stepped` solution -- [dagx](https://github.com/swaits/dagx): blazing fast in-memory `dag` solution +- [dagx](https://github.com/swaits/dagx): blazing fast *in-memory* `dag` solution ## License diff --git a/apalis-workflow/src/dag/context.rs b/apalis-workflow/src/dag/context.rs index 718f2b98..8a9cf464 100644 --- a/apalis-workflow/src/dag/context.rs +++ b/apalis-workflow/src/dag/context.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; /// Metadata stored in each task for workflow processing #[derive(Debug, Deserialize, Serialize, Default)] -pub struct DagflowContext { +pub struct DagFlowContext { /// Previous node executed in the DAG /// This is the source node that led to the current node's execution pub prev_node: Option, @@ -29,7 +29,7 @@ pub struct DagflowContext { pub root_task_id: Option>, } -impl Clone for DagflowContext { +impl Clone for DagFlowContext { fn clone(&self) -> Self { Self { prev_node: self.prev_node, @@ -43,7 +43,7 @@ impl Clone for DagflowContext { } } -impl DagflowContext { +impl DagFlowContext { /// Create initial context for DAG execution pub fn new(root_task_id: Option>) -> Self { Self { diff --git a/apalis-workflow/src/dag/error.rs b/apalis-workflow/src/dag/error.rs index 03815e11..34b0e2f4 100644 --- a/apalis-workflow/src/dag/error.rs +++ b/apalis-workflow/src/dag/error.rs @@ -5,7 +5,7 @@ use thiserror::Error; /// Errors that can occur during DAG workflow execution. #[derive(Error, Debug)] -pub enum DagflowError { +pub enum DagFlowError { /// An error originating from the actual node execution. #[error("Node execution error: {0}")] Node(#[source] BoxDynError), diff --git a/apalis-workflow/src/dag/executor.rs b/apalis-workflow/src/dag/executor.rs index 4419b635..f71d7832 100644 --- a/apalis-workflow/src/dag/executor.rs +++ b/apalis-workflow/src/dag/executor.rs @@ -25,8 +25,8 @@ use petgraph::{ use tower::Service; use crate::{ - DagService, - dag::{DagflowContext, RootDagService, error::DagflowError, response::DagExecutionResponse}, + DagFlow, DagService, + dag::{DagFlowContext, RootDagService, error::DagFlowError, response::DagExecutionResponse}, id_generator::GenerateId, }; @@ -65,7 +65,7 @@ where B: BackendExt, { /// Get a node by name - pub fn get_node_by_name_mut( + fn get_node_by_name_mut( &mut self, name: &str, ) -> Option<&mut DagService> { @@ -73,42 +73,19 @@ where .get(name) .and_then(|&idx| self.graph.node_weight_mut(idx)) } - - /// Export the DAG to DOT format - #[must_use] - pub fn to_dot(&self) -> String { - let names = self - .node_mapping - .iter() - .map(|(name, &idx)| (idx, name.clone())) - .collect::>(); - let get_node_attributes = |_, (index, _)| { - format!( - "label=\"{}\"", - names.get(&index).cloned().unwrap_or_default() - ) - }; - let dot = petgraph::dot::Dot::with_attr_getters( - &self.graph, - &[Config::NodeNoLabel, Config::EdgeNoLabel], - &|_, _| String::new(), - &get_node_attributes, - ); - format!("{dot:?}") - } } impl Service> for DagExecutor where B: BackendExt, B::Context: - Send + Sync + 'static + MetadataExt, Error = MetaError> + Default, + Send + Sync + 'static + MetadataExt, Error = MetaError> + Default, B::IdType: Clone + Send + Sync + 'static + GenerateId + Debug, B::Compact: Send + Sync + 'static, MetaError: Into, { type Response = B::Compact; - type Error = DagflowError; + type Error = DagFlowError; type Future = Pin> + Send>>; fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { @@ -123,7 +100,7 @@ where .node_weight_mut(self.not_ready[0]) .unwrap() .poll_ready(cx) - .map_err(|e| DagflowError::Service(e))? + .map_err(|e| DagFlowError::Service(e))? .is_pending() { return Poll::Pending; @@ -139,54 +116,38 @@ where Box::pin(async move { let context = req - .extract::>>() + .extract::>>() .await - .map_err(|e| DagflowError::Metadata(e.into()))? + .map_err(|e| DagFlowError::Metadata(e.into()))? .0; // Get the service for this node let service = graph .node_weight_mut(context.current_node) - .ok_or_else(|| DagflowError::MissingService(context.current_node))?; + .ok_or_else(|| DagFlowError::MissingService(context.current_node))?; - let result = service.call(req).await.map_err(|e| DagflowError::Node(e))?; + let result = service.call(req).await.map_err(|e| DagFlowError::Node(e))?; Ok(result) }) } } -impl - IntoWorkerService, B::Compact, B::Context> for DagExecutor +impl IntoWorkerService, B::Compact, B::Context> for DagFlow where - B: BackendExt - + Send - + Sync - + 'static - + Sink, Error = Err> - + Unpin - + Clone - + WaitForCompletion, + B: BackendExt + Clone, Err: std::error::Error + Send + Sync + 'static, - B::Context: MetadataExt, Error = MetaError> + Send + Sync + 'static, + B::Context: MetadataExt> + Send + Sync + 'static, B::IdType: Send + Sync + 'static + Default + GenerateId + PartialEq + Debug, - B: Sync + Backend, B::Compact: Send + Sync + 'static + Clone, - >>::Error: Into, - B::Codec: Codec, Compact = Compact, Error = CdcErr> + 'static, - CdcErr: Into, - ::Codec: Codec< - DagExecutionResponse::IdType>, - Compact = Compact, - Error = CdcErr, - >, - MetaError: Send + Sync + 'static + Into, + RootDagService: Service>, { type Backend = RawDataBackend; fn into_service(self, b: B) -> WorkerService, RootDagService> { + let executor = self.build().expect("Execution should be valid"); WorkerService { backend: RawDataBackend::new(b.clone()), - service: RootDagService::new(self, b), + service: RootDagService::new(executor, b), } } } diff --git a/apalis-workflow/src/dag/mod.rs b/apalis-workflow/src/dag/mod.rs index eb31e974..945119eb 100644 --- a/apalis-workflow/src/dag/mod.rs +++ b/apalis-workflow/src/dag/mod.rs @@ -1,6 +1,6 @@ use std::{ collections::{HashMap, VecDeque}, - fmt::Debug, + fmt::{self, Debug}, marker::PhantomData, sync::Mutex, }; @@ -43,11 +43,11 @@ use tower::{Service, ServiceBuilder, util::BoxCloneSyncService}; use crate::{ BoxedService, DagService, - dag::{error::DagflowError, executor::DagExecutor, node::NodeService}, + dag::{error::DagFlowError, executor::DagExecutor, node::NodeService}, id_generator::GenerateId, }; -pub use context::DagflowContext; +pub use context::DagFlowContext; pub use service::RootDagService; /// Directed Acyclic Graph (DAG) workflow builder @@ -69,6 +69,15 @@ where } } +impl fmt::Display for DagFlow +where + B: BackendExt, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.to_dot()) + } +} + impl DagFlow where B: BackendExt, @@ -167,14 +176,50 @@ where ) } + /// Validate the DAG for cycles + pub fn validate(&self) -> Result<(), DagFlowError> { + // Validate DAG (check for cycles) + toposort( + &*self.graph.lock().expect("Failed to lock graph mutex"), + None, + ) + .map_err(DagFlowError::CyclicDAG)?; + Ok(()) + } + + /// Export the DAG to DOT format + pub fn to_dot(&self) -> String { + let names = self + .node_mapping + .lock() + .expect("could not lock nodes") + .iter() + .map(|(name, &idx)| (idx, name.clone())) + .collect::>(); + let get_node_attributes = |_, (index, _)| { + format!( + "label=\"{}\"", + names.get(&index).cloned().unwrap_or_default() + ) + }; + let graph = self.graph.lock().expect("could not lock graph"); + let dot = petgraph::dot::Dot::with_attr_getters( + &*graph, + &[Config::NodeNoLabel, Config::EdgeNoLabel], + &|_, _| String::new(), + &get_node_attributes, + ); + format!("{dot:?}") + } + /// Build the DAG executor - pub fn build(self) -> Result, DagflowError> { + pub(crate) fn build(self) -> Result, DagFlowError> { // Validate DAG (check for cycles) let sorted = toposort( &*self.graph.lock().expect("Failed to lock graph mutex"), None, ) - .map_err(DagflowError::CyclicDAG)?; + .map_err(DagFlowError::CyclicDAG)?; fn find_edge_nodes(graph: &DiGraph, direction: Direction) -> Vec { graph @@ -406,12 +451,10 @@ mod tests { }), ) .depends_on(&middle); - let dag_executor = dag.build().unwrap(); - assert_eq!(dag_executor.topological_order.len(), 3); - println!("DAG in DOT format:\n{}", dag_executor.to_dot()); + println!("DAG in DOT format:\n{}", dag.to_dot()); - let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); + let mut backend = JsonStorage::new_temp().unwrap(); backend.push_start(Value::from(42)).await.unwrap(); @@ -423,9 +466,7 @@ mod tests { ctx.stop().unwrap(); } }) - .build::>, RootDagService>>( - dag_executor, - ); + .build(dag); worker.run().await.unwrap(); } @@ -454,10 +495,7 @@ mod tests { ) .depends_on((&plus_one, &multiply, &squared)); - let dag_executor = dag.build().unwrap(); - assert_eq!(dag_executor.topological_order.len(), 5); - - println!("DAG in DOT format:\n{}", dag_executor.to_dot()); + println!("DAG in DOT format:\n{}", dag.to_dot()); let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); @@ -471,9 +509,7 @@ mod tests { ctx.stop().unwrap(); } }) - .build::>, RootDagService>>( - dag_executor, - ); + .build(dag); worker.run().await.unwrap(); } @@ -517,10 +553,8 @@ mod tests { }), ) .depends_on((&main_collector, &side_collector)); - let dag_executor = dag.build().unwrap(); - assert_eq!(dag_executor.topological_order.len(), 6); - println!("DAG in DOT format:\n{}", dag_executor.to_dot()); + println!("DAG in DOT format:\n{}", dag.to_dot()); let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); @@ -537,9 +571,7 @@ mod tests { ctx.stop().unwrap(); } }) - .build::>, RootDagService>>( - dag_executor, - ); + .build(dag); worker.run().await.unwrap(); } @@ -609,20 +641,9 @@ mod tests { dag.route(check_approval).depends_on(&on_collect); - let dag_executor = dag.build().unwrap(); - assert_eq!(dag_executor.topological_order.len(), 7); + println!("DAG in DOT format:\n{}", dag.to_dot()); - println!("Start nodes: {:?}", dag_executor.start_nodes); - println!("End nodes: {:?}", dag_executor.end_nodes); - - println!( - "DAG Topological Order: {:?}", - dag_executor.topological_order - ); - - println!("DAG in DOT format:\n{}", dag_executor.to_dot()); - - let mut backend: JsonStorage = JsonStorage::new_temp().unwrap(); + let mut backend = JsonStorage::new_temp().unwrap(); backend .push_start(Value::from(vec![17, 18, 19])) @@ -637,44 +658,7 @@ mod tests { ctx.stop().unwrap(); } }) - .build::>, RootDagService>>( - dag_executor, - ); + .build(dag); worker.run().await.unwrap(); - - // let inner_basic: Workflow, _> = Workflow::new("basic") - // .and_then(async |input: u32| (input + 1) as usize) - // .and_then(async |input: usize| input.to_string()) - // .and_then(async |input: String| input.parse::()); - - // let workflow = Workflow::new("example_workflow") - // .and_then(async |input: u32| Ok::, BoxDynError>(input..100)) - // .filter_map( - // async |input: u32| { - // if input > 50 { Some(input) } else { None } - // }, - // ) - // .and_then(async |items: Vec| Ok::<_, BoxDynError>(items)) - // .fold(0, async |(acc, item): (u32, u32)| { - // Ok::<_, BoxDynError>(item + acc) - // }) - // // .delay_for(Duration::from_secs(2)) - // // .delay_with(|_| Duration::from_secs(1)) - // // .and_then(async |items: Range| Ok::<_, BoxDynError>(items.sum::())) - // // .repeat_until(async |i: u32| { - // // if i < 20 { - // // Ok::<_, BoxDynError>(Some(i)) - // // } else { - // // Ok(None) - // // } - // // }) - // // .chain(inner_basic) - // // .chain( - // // Workflow::new("sub_workflow") - // // .and_then(async |input: usize| Ok::<_, BoxDynError>(input as u32 * 2)) - // // .and_then(async |input: u32| Ok::<_, BoxDynError>(input + 10)), - // // ) - // // .chain(dag_executor) - // .build(); } } diff --git a/apalis-workflow/src/dag/service.rs b/apalis-workflow/src/dag/service.rs index 8542dd27..c999c958 100644 --- a/apalis-workflow/src/dag/service.rs +++ b/apalis-workflow/src/dag/service.rs @@ -22,7 +22,7 @@ use std::pin::Pin; use std::task::{Context, Poll}; use tower::Service; -use crate::dag::context::DagflowContext; +use crate::dag::context::DagFlowContext; use crate::dag::response::DagExecutionResponse; use crate::id_generator::GenerateId; use crate::{DagExecutor, DagService}; @@ -76,7 +76,7 @@ where B::IdType: GenerateId + Send + Sync + 'static + PartialEq + Debug, B::Compact: Send + Sync + 'static + Clone, B::Context: - Send + Sync + Default + MetadataExt, Error = MetaError> + 'static, + Send + Sync + Default + MetadataExt, Error = MetaError> + 'static, Err: std::error::Error + Send + Sync + 'static, B: Sink, Error = Err> + Unpin, B::Codec: Codec, Compact = B::Compact, Error = CdcErr> @@ -103,14 +103,14 @@ where let end_nodes = executor.end_nodes.clone(); async move { let ctx = req - .extract::>>() + .extract::>>() .await; let (response, context) = match ctx { Ok(Meta(mut context)) => { #[cfg(feature = "tracing")] tracing::debug!( node = ?context.current_node, - "Extracted DagflowContext for task" + "Extracted DagFlowContext for task" ); let incoming_nodes = executor .graph @@ -227,14 +227,14 @@ where Err(e) => { #[cfg(feature = "tracing")] tracing::debug!( - "Extracting DagflowContext for task without meta" + "Extracting DagFlowContext for task without meta" ); // if no metadata, we assume its an entry task match start_nodes.len() { 1 => { #[cfg(feature = "tracing")] tracing::debug!("Single start node detected, proceeding with execution"); - let context = DagflowContext::new(req.parts.task_id.clone()); + let context = DagFlowContext::new(req.parts.task_id.clone()); let task_id = req.parts.task_id.clone(); req.parts .ctx @@ -249,7 +249,7 @@ where let new_node_task_ids = fan_out_entry_nodes( &executor, &mut backend, - &mut DagflowContext::new(req.parts.task_id.clone()), + &mut DagFlowContext::new(req.parts.task_id.clone()), &req.args, ) .await?; @@ -318,13 +318,13 @@ async fn fan_out_next_nodes( executor: &DagExecutor, outgoing_nodes: Vec, backend: &mut B, - context: &mut DagflowContext, + context: &mut DagFlowContext, input: &B::Compact, ) -> Result>, BoxDynError> where B::IdType: GenerateId + Send + Sync + 'static + PartialEq + Debug, B::Compact: Send + Sync + 'static + Clone, - B::Context: Send + Sync + Default + MetadataExt> + 'static, + B::Context: Send + Sync + Default + MetadataExt> + 'static, B: Sink, Error = Err> + Unpin, Err: std::error::Error + Send + Sync + 'static, B: BackendExt + Send + Sync + 'static + Clone + WaitForCompletion, @@ -345,7 +345,7 @@ where .clone(); let task = TaskBuilder::new(input.clone()) .with_task_id(task_id.clone()) - .meta(DagflowContext { + .meta(DagFlowContext { prev_node: context.prev_node.clone(), current_node: outgoing_node, completed_nodes: context.completed_nodes.clone(), @@ -371,13 +371,13 @@ where async fn fan_out_entry_nodes( executor: &DagExecutor, backend: &mut B, - context: &mut DagflowContext, + context: &mut DagFlowContext, input: &B::Compact, ) -> Result>, BoxDynError> where B::IdType: GenerateId + Send + Sync + 'static + PartialEq + Debug, B::Compact: Send + Sync + 'static + Clone, - B::Context: Send + Sync + Default + MetadataExt> + 'static, + B::Context: Send + Sync + Default + MetadataExt> + 'static, B: Sink, Error = Err> + Unpin, Err: std::error::Error + Send + Sync + 'static, B: BackendExt + Send + Sync + 'static + Clone + WaitForCompletion, @@ -407,7 +407,7 @@ where .clone(); let task = TaskBuilder::new(input) .with_task_id(task_id.clone()) - .meta(DagflowContext { + .meta(DagFlowContext { prev_node: None, current_node: outgoing_node, completed_nodes: Default::default(), diff --git a/apalis-workflow/src/step.rs b/apalis-workflow/src/step.rs index 2b5fbc5b..2f67e13b 100644 --- a/apalis-workflow/src/step.rs +++ b/apalis-workflow/src/step.rs @@ -10,9 +10,9 @@ pub trait Layer { fn layer(&self, step: S) -> Self::Step; } -/// A workflow step +/// A sequential step /// -/// A single unit of work in a workflow pipeline. +/// A single unit of work in a sequential workflow pipeline. pub trait Step where B: BackendExt, diff --git a/apalis/README.md b/apalis/README.md index 3cb6fe98..0f487ebf 100644 --- a/apalis/README.md +++ b/apalis/README.md @@ -56,7 +56,7 @@ | `apalis-core` | | | | `apalis-workflow` | | | | `apalis-board` | | | -| `apalis-board-api` | | | +| `apalis-board-api` | | | ## Getting Started @@ -165,9 +165,9 @@ sequenceDiagram ## Observability -With the [web UI](https://github.com/apalis-dev/apalis-board), you can manage your jobs through a simple interface. Check out this [working example](https://github.com/apalis-dev/apalis-board/blob/master/examples/axum-email-service) to see how it works. +With the [web UI](https://github.com/apalis-dev/apalis-board), you can manage your jobs through a simple interface. Check out this [working example](https://github.com/apalis-dev/apalis-board/blob/main/examples/axum-email-service) to see how it works. -![Workers Screenshot](https://github.com/apalis-dev/apalis-board/raw/master/screenshots/workers.png) +![Workers Screenshot](https://github.com/apalis-dev/apalis-board/raw/main/screenshots/workers.png) ## Integrations diff --git a/examples/workflow/Cargo.toml b/examples/dag-workflow/Cargo.toml similarity index 79% rename from examples/workflow/Cargo.toml rename to examples/dag-workflow/Cargo.toml index ba4dc320..9738698b 100644 --- a/examples/workflow/Cargo.toml +++ b/examples/dag-workflow/Cargo.toml @@ -1,20 +1,19 @@ [package] -name = "workflow" +name = "dag" version = "0.1.0" +rust-version.workspace = true edition.workspace = true repository.workspace = true [dependencies] -tower = { version = "0.5", features = ["util"] } tokio = { version = "1", features = ["full"] } apalis = { path = "../../apalis", features = ["limit", "catch-panic", "retry"] } apalis-workflow = { path = "../../apalis-workflow" } -apalis-core = { path = "../../apalis-core", features = ["serde"] } apalis-file-storage = { path = "../../utils/apalis-file-storage" } serde = { version = "1", features = ["derive"] } -serde_json = "1" tracing-subscriber = "0.3.20" futures = "0.3" +serde_json = "1" [dependencies.tracing] default-features = false diff --git a/examples/dag-workflow/src/main.rs b/examples/dag-workflow/src/main.rs new file mode 100644 index 00000000..ae35ee15 --- /dev/null +++ b/examples/dag-workflow/src/main.rs @@ -0,0 +1,73 @@ +use apalis::prelude::*; +use apalis_file_storage::JsonStorage; +use apalis_workflow::{DagFlow, WorkflowSink}; +use serde_json::Value; +use tracing::info; + +async fn get_name(user_id: u32) -> Result { + Ok(user_id.to_string()) +} + +async fn get_age(user_id: u32) -> Result { + Ok(user_id as usize + 20) +} + +async fn get_address(user_id: u32) -> Result { + Ok(user_id as usize + 100) +} + +async fn collector( + (name, age, address): (String, usize, usize), + wrk: WorkerContext, // Nodes are still apalis services and can inject deps +) -> Result { + let result = name.parse::()? + age + address; + wrk.stop().unwrap(); + Ok(result) +} + +#[tokio::main] +async fn main() -> Result<(), BoxDynError> { + unsafe { + std::env::set_var("RUST_LOG", "debug"); + }; + tracing_subscriber::fmt::init(); + let mut backend = JsonStorage::new_temp().unwrap(); + backend + .push_start(Value::from(vec![42, 43, 44])) + .await + .unwrap(); + + let dag_flow = DagFlow::new(); + let get_name = dag_flow.node(get_name); + let get_age = dag_flow.node(get_age); + let get_address = dag_flow.node(get_address); + dag_flow + .node(collector) + .depends_on((&get_name, &get_age, &get_address)); // Order and types matters here + + dag_flow.validate()?; // Ensure DAG is valid + + // This should print something like: + // digraph { + // 0 [ label="dag::get_name"] + // 1 [ label="dag::get_age"] + // 2 [ label="dag::get_address"] + // 3 [ label="dag::collector"] + // 0 -> 3 [ ] + // 1 -> 3 [ ] + // 2 -> 3 [ ] + // } + // + // You can visualize this using tools like Graphviz + // https://dreampuf.github.io/GraphvizOnline/ + info!("Executing workflow:\n{}", dag_flow); // Print the DAG structure in dot format + + WorkerBuilder::new("tasty-banana") + .backend(backend) + .enable_tracing() + .on_event(|_c, e| info!("{e}")) + .build(dag_flow) + .run() + .await?; + Ok(()) +} diff --git a/examples/stepped-workflow/Cargo.toml b/examples/stepped-workflow/Cargo.toml new file mode 100644 index 00000000..b13e4684 --- /dev/null +++ b/examples/stepped-workflow/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "stepped-workflow" +version = "0.1.0" +edition.workspace = true +repository.workspace = true + +[dependencies] +tokio = { version = "1", features = ["full"] } +apalis = { path = "../../apalis", features = ["limit", "catch-panic", "retry"] } +apalis-workflow = { path = "../../apalis-workflow" } +apalis-file-storage = { path = "../../utils/apalis-file-storage" } +serde = { version = "1", features = ["derive"] } +tracing-subscriber = "0.3.20" +futures = "0.3" + +[dependencies.tracing] +default-features = false +version = "0.1" diff --git a/examples/workflow/src/main.rs b/examples/stepped-workflow/src/main.rs similarity index 100% rename from examples/workflow/src/main.rs rename to examples/stepped-workflow/src/main.rs From e887985f9b40d035f0081124bc9bc42a27f59c09 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Mon, 22 Dec 2025 23:26:42 +0300 Subject: [PATCH 09/12] chore: update and audit --- Cargo.lock | 4 ++-- supply-chain/config.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8a5442ae..98888ec4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1950,9 +1950,9 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" [[package]] name = "reqwest" -version = "0.12.27" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e893f6bece5953520ddbb3f8f46f3ef36dd1fef4ee9b087c4b4a725fd5d10e4" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64", "bytes", diff --git a/supply-chain/config.toml b/supply-chain/config.toml index 49d7ee4f..902f1037 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -745,7 +745,7 @@ version = "0.8.8" criteria = "safe-to-deploy" [[exemptions.reqwest]] -version = "0.12.27" +version = "0.12.28" criteria = "safe-to-deploy" [[exemptions.ring]] From eba9b529f822741b8f129f3e006bcb6dfd7ffaf8 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Tue, 23 Dec 2025 09:30:56 +0300 Subject: [PATCH 10/12] chore: refactor and include repeater --- apalis-core/src/backend/polling.rs | 424 ------------------ apalis-core/src/task/builder.rs | 4 +- apalis-core/src/task/metadata.rs | 9 + apalis-sql/src/context.rs | 34 +- apalis-sql/src/ext.rs | 24 + apalis-sql/src/from_row.rs | 8 +- apalis-sql/src/lib.rs | 3 + apalis-workflow/README.md | 6 +- .../src/{chain => composite}/mod.rs | 6 +- apalis-workflow/src/dag/executor.rs | 15 +- apalis-workflow/src/dag/mod.rs | 68 ++- apalis-workflow/src/dag/response.rs | 2 + apalis-workflow/src/dag/service.rs | 37 +- apalis-workflow/src/lib.rs | 48 +- apalis-workflow/src/repeat_until/mod.rs | 14 - .../src/{ => sequential}/and_then/mod.rs | 10 +- .../src/{ => sequential}/context.rs | 0 .../src/{ => sequential}/delay/mod.rs | 6 +- .../src/{ => sequential}/filter_map/mod.rs | 12 +- .../src/{ => sequential}/fold/mod.rs | 8 +- apalis-workflow/src/sequential/mod.rs | 31 ++ .../src/sequential/repeat_until/mod.rs | 285 ++++++++++++ .../src/{ => sequential}/router.rs | 0 .../src/{ => sequential}/service.rs | 4 +- apalis-workflow/src/{ => sequential}/step.rs | 2 +- .../src/{ => sequential}/workflow.rs | 13 +- apalis-workflow/src/sink.rs | 72 ++- examples/dag-workflow/src/main.rs | 2 +- 28 files changed, 565 insertions(+), 582 deletions(-) delete mode 100644 apalis-core/src/backend/polling.rs create mode 100644 apalis-sql/src/ext.rs rename apalis-workflow/src/{chain => composite}/mod.rs (65%) delete mode 100644 apalis-workflow/src/repeat_until/mod.rs rename apalis-workflow/src/{ => sequential}/and_then/mod.rs (96%) rename apalis-workflow/src/{ => sequential}/context.rs (100%) rename apalis-workflow/src/{ => sequential}/delay/mod.rs (97%) rename apalis-workflow/src/{ => sequential}/filter_map/mod.rs (97%) rename apalis-workflow/src/{ => sequential}/fold/mod.rs (98%) create mode 100644 apalis-workflow/src/sequential/mod.rs create mode 100644 apalis-workflow/src/sequential/repeat_until/mod.rs rename apalis-workflow/src/{ => sequential}/router.rs (100%) rename apalis-workflow/src/{ => sequential}/service.rs (98%) rename apalis-workflow/src/{ => sequential}/step.rs (96%) rename apalis-workflow/src/{ => sequential}/workflow.rs (90%) diff --git a/apalis-core/src/backend/polling.rs b/apalis-core/src/backend/polling.rs deleted file mode 100644 index 1da635bb..00000000 --- a/apalis-core/src/backend/polling.rs +++ /dev/null @@ -1,424 +0,0 @@ -use std::sync::Arc; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::task::{Context, Poll}; -use std::time::{Duration, Instant}; -use std::{pin::Pin, sync::atomic::AtomicU64}; - -use futures_core::Stream; -use futures_core::stream::BoxStream; - -use futures_util::future::poll_fn; -use futures_util::lock::Mutex; -use futures_util::{FutureExt, StreamExt, stream}; - -use crate::worker::context::WorkerContext; - -/// A trait for different polling strategies -/// All strategies can be combined in a race condition -pub trait PollStrategy { - /// The stream returned by the strategy - type Stream: Stream + Send; - - /// Create a stream that completes when the next poll should occur - fn poll_ready( - self: Box, - ctx: &WorkerContext, - prev_count: &Arc, - ) -> Self::Stream; -} - -type BoxedPollStrategy = - Box + Send>>> + Send + Sync + 'static>; - -struct WrapperStrategy -where - S: PollStrategy + Send + 'static, -{ - strategy: S, -} - -impl PollStrategy for WrapperStrategy -where - S: PollStrategy + Send + 'static, -{ - type Stream = Pin + Send>>; - - fn poll_ready( - self: Box, - ctx: &WorkerContext, - prev_count: &Arc, - ) -> Self::Stream { - Box::new(self.strategy).poll_ready(ctx, prev_count).boxed() - } -} - -pub struct StrategyBuilder { - strategies: Vec, -} - -impl StrategyBuilder { - pub fn new() -> Self { - Self { - strategies: Vec::new(), - } - } - - pub fn apply(mut self, strategy: S) -> Self - where - S: PollStrategy + 'static + Sync + Send, - Stm: Stream + Send + 'static, - { - self.strategies.push(Box::new(WrapperStrategy { strategy })); - self - } - - pub fn build(self) -> Strategy { - Strategy { - strategies: Arc::new(std::sync::Mutex::new( - self.strategies - .into_iter() - .map(|s| Some(s)) - .collect::>(), - )), - } - } -} - -#[derive(Clone)] -pub struct Strategy { - strategies: Arc>>>, -} - -impl Strategy { - pub fn build( - self, - ctx: &WorkerContext, - prev_count: &Arc, - ) -> Pin + Send>> { - let this = Box::new(self); - this.poll_ready(ctx, prev_count) - } -} - -impl PollStrategy for Strategy { - type Stream = Pin + Send>>; - - fn poll_ready( - mut self: Box, - ctx: &WorkerContext, - prev_count: &Arc, - ) -> Self::Stream { - let ctx = ctx.clone(); - let prev_count = prev_count.clone(); - let ctx = ctx.clone(); - let mut streams = self - .strategies - .lock() - .unwrap() - .drain(..) - .enumerate() - .map(move |(i, mut a)| { - let ctx = ctx.clone(); - let prev_count = prev_count.clone(); - let lock = a.take().expect("Strategy already taken"); - lock.poll_ready(&ctx, &prev_count) - }) - .collect::>(); - // Reverse to give priority to strategies in the order they were added - streams.reverse(); - RaceNext::new(streams).map(|(_idx, _)| ()).boxed() - } -} - -/// A stream that polls multiple streams, always returning the first ready item, -/// and skipping one item from all other streams each round. -pub struct RaceNext { - streams: Vec + Send>>>>, - pending_skips: Vec, -} - -impl RaceNext { - pub fn new(streams: Vec + Send>>>) -> Self { - let len = streams.len(); - Self { - streams: streams.into_iter().map(Some).collect(), - pending_skips: vec![false; len], - } - } -} - -impl Stream for RaceNext { - type Item = (usize, T); - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.as_mut().get_mut(); - - // First, handle any pending skips from the previous round - for i in 0..this.streams.len() { - if this.pending_skips[i] { - if let Some(ref mut stream) = this.streams[i] { - match stream.as_mut().poll_next(cx) { - Poll::Ready(Some(_)) => { - // Successfully skipped an item - this.pending_skips[i] = false; - } - Poll::Ready(None) => { - // Stream ended while trying to skip - this.streams[i] = None; - this.pending_skips[i] = false; - } - Poll::Pending => { - // Still waiting to skip, continue to next stream - continue; - } - } - } - } - } - - // Now poll for the next ready item - let mut any_pending = false; - for i in 0..this.streams.len() { - // Skip streams that are still pending a skip operation - if this.pending_skips[i] { - any_pending = true; - continue; - } - - if let Some(ref mut stream) = this.streams[i] { - match stream.as_mut().poll_next(cx) { - Poll::Ready(Some(item)) => { - // Found a ready item! Mark other streams for skipping - for j in 0..this.streams.len() { - if j != i && this.streams[j].is_some() { - this.pending_skips[j] = true; - } - } - return Poll::Ready(Some((i, item))); - } - Poll::Ready(None) => { - // This stream ended, remove it - this.streams[i] = None; - } - Poll::Pending => { - any_pending = true; - } - } - } - } - - // Check if all streams are exhausted - if this.streams.iter().all(|s| s.is_none()) { - return Poll::Ready(None); - } - - if any_pending { - Poll::Pending - } else { - // All remaining streams are exhausted - Poll::Ready(None) - } - } -} - -impl RaceNext { - /// Returns the number of active streams remaining - pub fn active_count(&self) -> usize { - self.streams.iter().filter(|s| s.is_some()).count() - } - - /// Checks if any streams are still active - pub fn has_active_streams(&self) -> bool { - self.streams.iter().any(|s| s.is_some()) - } -} - -// Simple PRNG state for jitter (thread-safe) -static JITTER_STATE: AtomicU64 = AtomicU64::new(1); - -/// Backoff configuration for strategies -#[derive(Debug, Clone)] -pub struct BackoffConfig { - pub max_delay: Duration, - pub multiplier: f64, - pub jitter_factor: f64, // 0.0 to 1.0 -} - -impl Default for BackoffConfig { - fn default() -> Self { - Self { - max_delay: Duration::from_secs(60), - multiplier: 2.0, - jitter_factor: 0.1, - } - } -} - -impl BackoffConfig { - pub fn new(max: Duration) -> Self { - Self { - max_delay: max, - ..Default::default() - } - } - - pub fn with_multiplier(mut self, multiplier: f64) -> Self { - self.multiplier = multiplier; - self - } - - pub fn with_jitter(mut self, jitter_factor: f64) -> Self { - self.jitter_factor = jitter_factor.clamp(0.0, 1.0); - self - } - - /// Calculate the next delay with backoff and jitter - pub fn next_delay( - &self, - default_delay: Duration, - current_delay: Duration, - failed: bool, - ) -> Duration { - let base_delay = if failed { - // Exponential backoff on failure - let next = Duration::from_secs_f64(current_delay.as_secs_f64() * self.multiplier); - next.min(self.max_delay) - } else { - // Reset to initial on success - default_delay - }; - - // Add jitter using a simple LCG (Linear Congruential Generator) - if self.jitter_factor > 0.0 { - // Simple deterministic pseudo-random number generation - let mut state = JITTER_STATE.load(Ordering::Relaxed); - state = state.wrapping_mul(1103515245).wrapping_add(12345); - JITTER_STATE.store(state, Ordering::Relaxed); - - // Convert to 0.0-1.0 range - let normalized = (state as f64) / (u64::MAX as f64); - - // Apply jitter: -jitter_factor to +jitter_factor - let jitter_range = base_delay.as_secs_f64() * self.jitter_factor; - let jitter = (normalized - 0.5) * 2.0 * jitter_range; - let jittered = base_delay.as_secs_f64() + jitter; - Duration::from_secs_f64(jittered.max(0.0)) - } else { - base_delay - } - } -} - -/// Interval-based polling strategy with optional backoff -#[derive(Clone)] -pub struct IntervalStrategy { - interval: Duration, - current_delay: Duration, -} - -pub struct BackoffStrategy { - interval: IntervalStrategy, - backoff_config: BackoffConfig, - default_delay: Duration, -} - -impl IntervalStrategy { - pub fn new(interval: Duration) -> Self { - Self { - interval, - current_delay: interval, - } - } - - pub fn with_backoff(self, config: BackoffConfig) -> BackoffStrategy { - BackoffStrategy { - default_delay: self.interval, - interval: self, - backoff_config: config, - } - } -} - -impl PollStrategy for IntervalStrategy { - type Stream = BoxStream<'static, ()>; - - fn poll_ready(self: Box, ctx: &WorkerContext, prev: &Arc) -> Self::Stream { - let interval = self.interval; - stream::unfold((), move |()| { - let fut = futures_timer::Delay::new(interval); - async move { - fut.await; - Some(((), ())) - } - }) - .boxed() - } -} - -impl PollStrategy for BackoffStrategy { - type Stream = BoxStream<'static, ()>; - - fn poll_ready(self: Box, _ctx: &WorkerContext, prev: &Arc) -> Self::Stream { - let backoff_config = self.backoff_config.clone(); - let current_delay = self.interval.interval.clone(); - let default_delay = self.default_delay; - - stream::unfold( - (prev.clone(), current_delay), - move |(prev, mut current_delay)| { - let fut = futures_timer::Delay::new(current_delay); - let backoff_config = backoff_config.clone(); - async move { - fut.await; - let failed = prev.load(Ordering::Relaxed) == 0; - current_delay = backoff_config.next_delay(default_delay, current_delay, failed); - Some(((), (prev, current_delay))) - } - }, - ) - .boxed() - } -} - -#[derive(Clone)] -pub struct FutureStrategy { - future_factory: Arc F + Send>>, -} - -impl FutureStrategy -where - F: Future + Send + 'static, -{ - pub fn new(factory: Factory) -> Self - where - Factory: FnMut(WorkerContext, usize) -> F + Send + 'static, - { - Self { - future_factory: Arc::new(Mutex::new(Box::new(factory))), - } - } -} - -impl PollStrategy for FutureStrategy -where - F: Future + Send + 'static, -{ - type Stream = Pin + Send>>; - fn poll_ready(self: Box, ctx: &WorkerContext, prev: &Arc) -> Self::Stream { - let factory = self.future_factory; - let ctx = ctx.clone(); - - stream::unfold((ctx, prev.clone()), move |(ctx, prev)| { - let factory = factory.clone(); - async move { - let fut = { - let mut lock = factory.try_lock().unwrap(); - (lock)(ctx.clone(), prev.load(Ordering::Relaxed)) - }; - fut.await; - Some(((), (ctx, prev))) - } - }) - .boxed() - } -} diff --git a/apalis-core/src/task/builder.rs b/apalis-core/src/task/builder.rs index af791808..1db0c929 100644 --- a/apalis-core/src/task/builder.rs +++ b/apalis-core/src/task/builder.rs @@ -34,7 +34,9 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; #[derive(Debug)] pub struct TaskBuilder { pub(super) args: Args, - pub(super) ctx: Ctx, + /// The backend context for the task + #[doc(hidden)] + pub ctx: Ctx, pub(super) data: Extensions, pub(super) task_id: Option>, pub(super) attempt: Option, diff --git a/apalis-core/src/task/metadata.rs b/apalis-core/src/task/metadata.rs index 8c1d597c..0a32628c 100644 --- a/apalis-core/src/task/metadata.rs +++ b/apalis-core/src/task/metadata.rs @@ -12,10 +12,19 @@ //! type-safe manner. use crate::task::Task; use crate::task_fn::FromRequest; +use std::ops::Deref; /// Metadata wrapper for task contexts. #[derive(Debug, Clone)] pub struct Meta(pub T); + +impl Deref for Meta { + type Target = T; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + /// Task metadata extension trait and implementations. /// This trait allows for injecting and extracting metadata associated with tasks. pub trait MetadataExt { diff --git a/apalis-sql/src/context.rs b/apalis-sql/src/context.rs index 477f5b55..15f589d7 100644 --- a/apalis-sql/src/context.rs +++ b/apalis-sql/src/context.rs @@ -13,8 +13,8 @@ use serde::{ }; /// The SQL context used for jobs stored in a SQL database -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct SqlContext { +#[derive(Debug, Serialize, Deserialize)] +pub struct SqlContext { max_attempts: i32, last_result: Option, lock_at: Option, @@ -23,15 +23,34 @@ pub struct SqlContext { priority: i32, queue: Option, meta: JsonMapMetadata, + // Marker to hold the Pool type + // Used to associate the context with a specific database pool type + _pool: std::marker::PhantomData, } -impl Default for SqlContext { +impl Clone for SqlContext { + fn clone(&self) -> Self { + Self { + lock_at: self.lock_at, + done_at: self.done_at, + max_attempts: self.max_attempts, + last_result: self.last_result.clone(), + lock_by: self.lock_by.clone(), + priority: self.priority, + queue: self.queue.clone(), + meta: self.meta.clone(), + _pool: std::marker::PhantomData, + } + } +} + +impl Default for SqlContext { fn default() -> Self { Self::new() } } -impl SqlContext { +impl SqlContext { /// Build a new context with defaults #[must_use] pub fn new() -> Self { @@ -44,6 +63,7 @@ impl SqlContext { priority: 0, queue: None, meta: Default::default(), + _pool: std::marker::PhantomData, } } @@ -152,14 +172,16 @@ impl SqlContext { } } -impl FromRequest> for SqlContext { +impl FromRequest> + for SqlContext +{ type Error = Infallible; async fn from_request(req: &Task) -> Result { Ok(req.parts.ctx.clone()) } } -impl MetadataExt for SqlContext { +impl MetadataExt for SqlContext { type Error = serde_json::Error; fn extract(&self) -> Result { self.meta diff --git a/apalis-sql/src/ext.rs b/apalis-sql/src/ext.rs new file mode 100644 index 00000000..4c2f0c11 --- /dev/null +++ b/apalis-sql/src/ext.rs @@ -0,0 +1,24 @@ +use apalis_core::task::builder::TaskBuilder; + +use crate::context::SqlContext; + +/// Extension traits for [`TaskBuilder`] +pub trait TaskBuilderExt { + /// Set the max number of attempts for the task being built. + fn max_attempts(self, attempts: u32) -> Self; + + /// Set the priority for the task being built. + fn priority(self, priority: i32) -> Self; +} + +impl TaskBuilderExt for TaskBuilder, IdType> { + fn max_attempts(mut self, attempts: u32) -> Self { + self.ctx = self.ctx.with_max_attempts(attempts as i32); + self + } + + fn priority(mut self, priority: i32) -> Self { + self.ctx = self.ctx.with_priority(priority); + self + } +} diff --git a/apalis-sql/src/from_row.rs b/apalis-sql/src/from_row.rs index 922b79c9..5cf5734e 100644 --- a/apalis-sql/src/from_row.rs +++ b/apalis-sql/src/from_row.rs @@ -58,9 +58,9 @@ pub struct TaskRow { impl TaskRow { /// Convert the TaskRow into a Task with decoded arguments - pub fn try_into_task( + pub fn try_into_task( self, - ) -> Result, FromRowError> + ) -> Result, IdType>, FromRowError> where D::Error: Into + Send + Sync + 'static, IdType: FromStr, @@ -107,9 +107,9 @@ impl TaskRow { } /// Convert the TaskRow into a Task with compacted arguments - pub fn try_into_task_compact( + pub fn try_into_task_compact( self, - ) -> Result, SqlContext, IdType>, FromRowError> + ) -> Result, SqlContext, IdType>, FromRowError> where IdType: FromStr, ::Err: std::error::Error + Send + Sync + 'static, diff --git a/apalis-sql/src/lib.rs b/apalis-sql/src/lib.rs index dd78801c..2d902688 100644 --- a/apalis-sql/src/lib.rs +++ b/apalis-sql/src/lib.rs @@ -8,6 +8,9 @@ pub mod context; /// SQL task row representation and conversion pub mod from_row; +/// Extension traits for `TaskBuilder` +pub mod ext; + /// Convert a string to a StatType #[must_use] pub fn stat_type_from_string(s: &str) -> StatType { diff --git a/apalis-workflow/README.md b/apalis-workflow/README.md index e09c57e7..0ef470a8 100644 --- a/apalis-workflow/README.md +++ b/apalis-workflow/README.md @@ -71,7 +71,7 @@ async fn get_address(user_id: u32) -> Result { } async fn collector( - (name, age, address): (String, usize, usize), + (name, age, address): (String, usize, usize), wrk: WorkerContext, ) -> Result { let result = name.parse::()? + age + address; @@ -89,7 +89,7 @@ async fn main() -> Result<(), BoxDynError> { .await .unwrap(); - let dag_flow = DagFlow::new(); + let dag_flow = DagFlow::new("user-etl-workflow"); let get_name = dag_flow.node(get_name); let get_age = dag_flow.node(get_age); let get_address = dag_flow.node(get_address); @@ -140,7 +140,7 @@ You can track your workflows using [apalis-board](https://github.com/apalis-dev/ ## Inspirations: - [Underway](https://github.com/maxcountryman/underway): Postgres-only `stepped` solution -- [dagx](https://github.com/swaits/dagx): blazing fast *in-memory* `dag` solution +- [dagx](https://github.com/swaits/dagx): blazing fast _in-memory_ `dag` solution ## License diff --git a/apalis-workflow/src/chain/mod.rs b/apalis-workflow/src/composite/mod.rs similarity index 65% rename from apalis-workflow/src/chain/mod.rs rename to apalis-workflow/src/composite/mod.rs index f65d04e4..93b3a7ed 100644 --- a/apalis-workflow/src/chain/mod.rs +++ b/apalis-workflow/src/composite/mod.rs @@ -1,8 +1,8 @@ -// pub struct Chain { +// pub struct Composite { // sub_flow: SubFlow, // } -// impl Step for Chain> { +// impl Step for Composite> { // type Response = Output; // type Error = Infallible; // fn register(&self, ctx: &mut Context<(), ()>) { @@ -10,7 +10,7 @@ // } // } -// impl Step for Chain { +// impl Step for Composite { // type Response = (); // type Error = Infallible; // fn register(&self, ctx: &mut StepRouter<()>) { diff --git a/apalis-workflow/src/dag/executor.rs b/apalis-workflow/src/dag/executor.rs index f71d7832..be512bec 100644 --- a/apalis-workflow/src/dag/executor.rs +++ b/apalis-workflow/src/dag/executor.rs @@ -6,10 +6,7 @@ use std::{ }; use apalis_core::{ - backend::{ - Backend, BackendExt, WaitForCompletion, - codec::{Codec, RawDataBackend}, - }, + backend::{BackendExt, codec::RawDataBackend}, error::BoxDynError, task::{ Task, @@ -17,16 +14,12 @@ use apalis_core::{ }, worker::builder::{IntoWorkerService, WorkerService}, }; -use futures::Sink; -use petgraph::{ - dot::Config, - graph::{DiGraph, NodeIndex}, -}; +use petgraph::graph::{DiGraph, NodeIndex}; use tower::Service; use crate::{ DagFlow, DagService, - dag::{DagFlowContext, RootDagService, error::DagFlowError, response::DagExecutionResponse}, + dag::{DagFlowContext, RootDagService, error::DagFlowError}, id_generator::GenerateId, }; @@ -65,7 +58,7 @@ where B: BackendExt, { /// Get a node by name - fn get_node_by_name_mut( + pub fn get_node_by_name_mut( &mut self, name: &str, ) -> Option<&mut DagService> { diff --git a/apalis-workflow/src/dag/mod.rs b/apalis-workflow/src/dag/mod.rs index 945119eb..567c1814 100644 --- a/apalis-workflow/src/dag/mod.rs +++ b/apalis-workflow/src/dag/mod.rs @@ -6,21 +6,16 @@ use std::{ }; use apalis_core::{ - backend::{ - Backend, BackendExt, WaitForCompletion, - codec::{Codec, RawDataBackend}, - }, + backend::{BackendExt, codec::Codec}, error::BoxDynError, - task::{Task, metadata::MetadataExt}, + task::Task, task_fn::{TaskFn, task_fn}, - worker::builder::{IntoWorkerService, WorkerService}, }; -use futures::Sink; use petgraph::{ Direction, algo::toposort, dot::Config, - graph::{DiGraph, EdgeIndex, Node, NodeIndex}, + graph::{DiGraph, EdgeIndex, NodeIndex}, }; /// DAG executor implementations pub mod executor; @@ -39,12 +34,11 @@ pub mod context; pub mod response; use serde::{Deserialize, Serialize}; -use tower::{Service, ServiceBuilder, util::BoxCloneSyncService}; +use tower::{Service, util::BoxCloneSyncService}; use crate::{ - BoxedService, DagService, + DagService, dag::{error::DagFlowError, executor::DagExecutor, node::NodeService}, - id_generator::GenerateId, }; pub use context::DagFlowContext; @@ -56,24 +50,18 @@ pub struct DagFlow where B: BackendExt, { + name: String, graph: Mutex, ()>>, node_mapping: Mutex>, } -impl Default for DagFlow -where - B: BackendExt, -{ - fn default() -> Self { - Self::new() - } -} - impl fmt::Display for DagFlow where B: BackendExt, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "DAG name: {}", self.name)?; + write!(f, "Dot format:\n")?; f.write_str(&self.to_dot()) } } @@ -84,8 +72,9 @@ where { /// Create a new DAG workflow builder #[must_use] - pub fn new() -> Self { + pub fn new(name: &str) -> Self { Self { + name: name.to_owned(), graph: Mutex::new(DiGraph::new()), node_mapping: Mutex::new(HashMap::new()), } @@ -311,6 +300,18 @@ pub struct NodeHandle { pub(crate) _phantom: PhantomData<(Input, Output)>, } +impl NodeHandle { + /// Get the node ID + pub fn id(&self) -> NodeIndex { + self.id + } + + /// Get the edge IDs + pub fn edges(&self) -> &[EdgeIndex] { + &self.edges + } +} + /// Trait for converting dependencies into node IDs pub trait DepsCheck { /// Convert dependencies to node indices @@ -409,31 +410,28 @@ pub enum DagState { #[cfg(test)] mod tests { - use std::{ - collections::HashMap, marker::PhantomData, num::ParseIntError, ops::Range, time::Duration, - }; + use std::num::ParseIntError; use apalis_core::{ error::BoxDynError, - task::{Task, builder::TaskBuilder, task_id::RandomId}, task_fn::task_fn, worker::{ builder::WorkerBuilder, context::WorkerContext, event::Event, ext::event_listener::EventListenerExt, }, }; - use apalis_file_storage::{JsonMapMetadata, JsonStorage}; + use apalis_file_storage::JsonStorage; use petgraph::graph::NodeIndex; use serde::{Deserialize, Serialize}; use serde_json::Value; - use crate::{WorkflowSink, step::Identity, workflow::Workflow}; + use crate::WorkflowSink; use super::*; #[tokio::test] async fn test_basic_workflow() { - let dag = DagFlow::new(); + let dag = DagFlow::new("sequential-workflow"); let start = dag.add_node("start", task_fn(|task: u32| async move { task as usize })); let middle = dag .add_node( @@ -442,7 +440,7 @@ mod tests { ) .depends_on(&start); - let end = dag + let _end = dag .add_node( "end", task_fn(|task: String, worker: WorkerContext| async move { @@ -472,7 +470,7 @@ mod tests { #[tokio::test] async fn test_fan_out_workflow() { - let dag = DagFlow::new(); + let dag = DagFlow::new("fan-out-workflow"); let source = dag.add_node("source", task_fn(|task: u32| async move { task as usize })); let plus_one = dag .add_node("plus_one", task_fn(|task: usize| async move { task + 1 })) @@ -485,7 +483,7 @@ mod tests { .add_node("squared", task_fn(|task: usize| async move { task * task })) .depends_on(&source); - let collector = dag + let _collector = dag .add_node( "collector", task_fn(|task: (usize, usize, usize), w: WorkerContext| async move { @@ -515,7 +513,7 @@ mod tests { #[tokio::test] async fn test_fan_in_workflow() { - let dag = DagFlow::new(); + let dag = DagFlow::new("fan-in-workflow"); let get_name = dag.add_node( "get_name", task_fn(|task: u32| async move { task as usize }), @@ -544,7 +542,7 @@ mod tests { ) .depends_on(vec![&get_name, &get_address]); - let final_node = dag + let _final_node = dag .add_node( "final_node", task_fn(|task: (usize, usize), w: WorkerContext| async move { @@ -577,7 +575,7 @@ mod tests { #[tokio::test] async fn test_routed_workflow() { - let dag = DagFlow::new(); + let dag = DagFlow::new("routed-workflow"); let entry1 = dag.add_node("entry1", task_fn(|task: u32| async move { task as usize })); let entry2 = dag.add_node("entry2", task_fn(|task: u32| async move { task as usize })); @@ -611,7 +609,7 @@ mod tests { } let collector = dag.node(collect).depends_on((&entry1, &entry2, &entry3)); - async fn vec_collect(task: Vec, worker: WorkerContext) -> usize { + async fn vec_collect(task: Vec, _wrk: WorkerContext) -> usize { task.iter().sum::() } diff --git a/apalis-workflow/src/dag/response.rs b/apalis-workflow/src/dag/response.rs index aeefab07..1fd751dc 100644 --- a/apalis-workflow/src/dag/response.rs +++ b/apalis-workflow/src/dag/response.rs @@ -16,6 +16,8 @@ pub enum DagExecutionResponse { FanOut { /// Result of the current task response: Compact, + /// Map of node indices to their task IDs + node_task_ids: HashMap>, }, /// Next task has been enqueued EnqueuedNext { diff --git a/apalis-workflow/src/dag/service.rs b/apalis-workflow/src/dag/service.rs index c999c958..d97266bd 100644 --- a/apalis-workflow/src/dag/service.rs +++ b/apalis-workflow/src/dag/service.rs @@ -1,31 +1,25 @@ +use apalis_core::backend::BackendExt; use apalis_core::backend::codec::Codec; -use apalis_core::backend::{self, BackendExt, TaskResult}; use apalis_core::task::builder::TaskBuilder; use apalis_core::task::metadata::Meta; use apalis_core::task::status::Status; -use apalis_core::task::{self, task_id}; use apalis_core::{ - backend::{Backend, WaitForCompletion}, + backend::WaitForCompletion, error::BoxDynError, task::{Task, metadata::MetadataExt, task_id::TaskId}, }; use futures::future::BoxFuture; -use futures::stream::StreamExt; use futures::{FutureExt, Sink, SinkExt}; -use petgraph::graph::{DiGraph, NodeIndex}; -use petgraph::{Direction, graph}; -use serde::{Deserialize, Serialize, de}; -use std::collections::{HashMap, HashSet, VecDeque}; +use petgraph::Direction; +use petgraph::graph::NodeIndex; +use std::collections::HashMap; use std::fmt::Debug; -use std::future::Future; -use std::pin::Pin; -use std::task::{Context, Poll}; use tower::Service; +use crate::DagExecutor; use crate::dag::context::DagFlowContext; use crate::dag::response::DagExecutionResponse; use crate::id_generator::GenerateId; -use crate::{DagExecutor, DagService}; /// Service that manages the execution of a DAG workflow pub struct RootDagService @@ -106,7 +100,7 @@ where .extract::>>() .await; let (response, context) = match ctx { - Ok(Meta(mut context)) => { + Ok(Meta(context)) => { #[cfg(feature = "tracing")] tracing::debug!( node = ?context.current_node, @@ -148,7 +142,7 @@ where dependency_task_ids.values().cloned().collect::>(), ) .await?; - if (results.iter().all(|s| matches!(s.status, Status::Done))) { + if results.iter().all(|s| matches!(s.status, Status::Done)) { let sorted_results = { // Match the order of incoming_nodes by matching NodeIndex let res =incoming_nodes @@ -170,7 +164,7 @@ where .collect::, BoxDynError>>(); match res { Ok(v) => v, - Err(e) => return Ok(DagExecutionResponse::WaitingForDependencies { pending_dependencies: dependency_task_ids }), + Err(_) => return Ok(DagExecutionResponse::WaitingForDependencies { pending_dependencies: dependency_task_ids }), } }; let encoded_input = B::Codec::encode( @@ -188,7 +182,7 @@ where ) })?; match decoded { - DagExecutionResponse::FanOut { response } => { + DagExecutionResponse::FanOut { response, .. } => { return Ok(response); } DagExecutionResponse::EnqueuedNext { result } => { @@ -212,7 +206,7 @@ where .collect::, String>>()?, ) .map_err(|e| e.into())?; - let req = req.map(|args| encoded_input); // Replace args with fan-in input + let req = req.map(|_| encoded_input); // Replace args with fan-in input let response = executor.call(req).await?; (response, context) } else { @@ -224,7 +218,7 @@ where } } - Err(e) => { + Err(_) => { #[cfg(feature = "tracing")] tracing::debug!( "Extracting DagFlowContext for task without meta" @@ -235,7 +229,6 @@ where #[cfg(feature = "tracing")] tracing::debug!("Single start node detected, proceeding with execution"); let context = DagFlowContext::new(req.parts.task_id.clone()); - let task_id = req.parts.task_id.clone(); req.parts .ctx .inject(context.clone()) @@ -270,6 +263,7 @@ where match outgoing_nodes.len() { 0 => { + assert!(end_nodes.contains(¤t_node), "Current node is not an end node"); // This was an end node return Ok(DagExecutionResponse::Complete { result: response }); } @@ -305,6 +299,7 @@ where .await?; return Ok(DagExecutionResponse::FanOut { response, + node_task_ids: next_task_ids, }); } } @@ -315,7 +310,7 @@ where } async fn fan_out_next_nodes( - executor: &DagExecutor, + _executor: &DagExecutor, outgoing_nodes: Vec, backend: &mut B, context: &mut DagFlowContext, @@ -386,7 +381,7 @@ where { let values: Vec = B::Codec::decode(input).map_err(|e: CdcErr| e.into())?; let start_nodes = executor.start_nodes.clone(); - if (values.len() != start_nodes.len()) { + if values.len() != start_nodes.len() { return Err(BoxDynError::from(format!( "Expected {} inputs for fan-in, got {}", start_nodes.len(), diff --git a/apalis-workflow/src/lib.rs b/apalis-workflow/src/lib.rs index c9c64aa8..d5c73d82 100644 --- a/apalis-workflow/src/lib.rs +++ b/apalis-workflow/src/lib.rs @@ -9,7 +9,7 @@ use apalis_core::{error::BoxDynError, task::Task}; -use crate::router::{GoTo, StepResult}; +use crate::sequential::router::{GoTo, StepResult}; type BoxedService = tower::util::BoxCloneSyncService; type SteppedService = @@ -17,41 +17,29 @@ type SteppedService = type DagService = BoxedService, Compact>; -/// combinator for sequential workflow execution. -pub mod and_then; /// combinator for chaining multiple workflows. -pub mod chain; -/// utilities for workflow context management. -pub mod context; +pub mod composite; /// utilities for directed acyclic graph workflows. -#[allow(unused)] pub mod dag; -/// utilities for introducing delays in workflow execution. -pub mod delay; -/// combinator for filtering and mapping workflow items. -pub mod filter_map; -/// combinator for folding over workflow items. -pub mod fold; mod id_generator; -/// utilities for workflow routing. -pub mod router; -/// utilities for workflow service orchestration. -pub mod service; +/// utilities for workflow steps. +pub mod sequential; /// utilities for workflow sinks. pub mod sink; -/// utilities for workflow steps. -pub mod step; -/// workflow definitions. -pub mod workflow; -pub use {dag::DagFlow, dag::executor::DagExecutor, sink::WorkflowSink, workflow::Workflow}; +pub use { + dag::DagFlow, dag::executor::DagExecutor, sequential::workflow::Workflow, sink::WorkflowSink, +}; #[cfg(test)] mod tests { use std::{collections::HashMap, time::Duration}; use apalis_core::{ - task::{builder::TaskBuilder, task_id::TaskId}, + task::{ + builder::TaskBuilder, + task_id::{RandomId, TaskId}, + }, task_fn::task_fn, worker::{ builder::WorkerBuilder, context::WorkerContext, event::Event, @@ -62,7 +50,7 @@ mod tests { use futures::SinkExt; use serde_json::Value; - use crate::{and_then::AndThen, workflow::Workflow}; + use crate::sequential::{AndThen, repeat_until::RepeaterState, workflow::Workflow}; use super::*; @@ -73,7 +61,17 @@ mod tests { .delay_for(Duration::from_secs(1)) .and_then(async |input: usize| (input) as usize) .delay_for(Duration::from_secs(1)) - // .delay_with(|_: Task| Duration::from_secs(1)) + .delay_with(|_| Duration::from_secs(1)) + .repeat_until(|res: usize, state: RepeaterState| async move { + println!("Iteration {}: got result {}", state.iterations(), res); + // Repeat until we have iterated 3 times + // Of course, in a real-world scenario, the condition would be based on `res` + if state.iterations() < 3 { + None + } else { + Some(res) + } + }) .add_step(AndThen::new(task_fn(async |input: usize| { Ok::<_, BoxDynError>(input.to_string()) }))) diff --git a/apalis-workflow/src/repeat_until/mod.rs b/apalis-workflow/src/repeat_until/mod.rs deleted file mode 100644 index 472e4826..00000000 --- a/apalis-workflow/src/repeat_until/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -pub struct RepeatUntil { - repeat: F, -} - -impl Step for RepeatUntil -where - F: Service, Response = Option> + Send + 'static + Clone, -{ - type Response = Res; - type Error = F::Error; - fn register(&self, ctx: &mut Context<(), ()>) { - // TODO - } -} diff --git a/apalis-workflow/src/and_then/mod.rs b/apalis-workflow/src/sequential/and_then/mod.rs similarity index 96% rename from apalis-workflow/src/and_then/mod.rs rename to apalis-workflow/src/sequential/and_then/mod.rs index 051bf0fb..34f91774 100644 --- a/apalis-workflow/src/and_then/mod.rs +++ b/apalis-workflow/src/sequential/and_then/mod.rs @@ -14,12 +14,12 @@ use tower::{Service, ServiceBuilder, layer::layer_fn}; use crate::{ SteppedService, - context::{StepContext, WorkflowContext}, id_generator::GenerateId, - router::{GoTo, StepResult, WorkflowRouter}, - service::handle_step_result, - step::{Layer, Stack, Step}, - workflow::Workflow, + sequential::context::{StepContext, WorkflowContext}, + sequential::router::{GoTo, StepResult, WorkflowRouter}, + sequential::service::handle_step_result, + sequential::step::{Layer, Stack, Step}, + sequential::workflow::Workflow, }; /// A layer that represents an `and_then` step in the workflow. diff --git a/apalis-workflow/src/context.rs b/apalis-workflow/src/sequential/context.rs similarity index 100% rename from apalis-workflow/src/context.rs rename to apalis-workflow/src/sequential/context.rs diff --git a/apalis-workflow/src/delay/mod.rs b/apalis-workflow/src/sequential/delay/mod.rs similarity index 97% rename from apalis-workflow/src/delay/mod.rs rename to apalis-workflow/src/sequential/delay/mod.rs index d92fecf3..e17085fb 100644 --- a/apalis-workflow/src/delay/mod.rs +++ b/apalis-workflow/src/sequential/delay/mod.rs @@ -11,10 +11,10 @@ use tower::Service; use crate::{ SteppedService, Workflow, - context::{StepContext, WorkflowContext}, + sequential::context::{StepContext, WorkflowContext}, id_generator::GenerateId, - router::{GoTo, StepResult, WorkflowRouter}, - step::{Layer, Stack, Step}, + sequential::router::{GoTo, StepResult, WorkflowRouter}, + sequential::step::{Layer, Stack, Step}, }; /// Layer that delays execution by a specified duration diff --git a/apalis-workflow/src/filter_map/mod.rs b/apalis-workflow/src/sequential/filter_map/mod.rs similarity index 97% rename from apalis-workflow/src/filter_map/mod.rs rename to apalis-workflow/src/sequential/filter_map/mod.rs index f047ebb6..f23aec2e 100644 --- a/apalis-workflow/src/filter_map/mod.rs +++ b/apalis-workflow/src/sequential/filter_map/mod.rs @@ -12,12 +12,12 @@ use tower::Service; use crate::{ SteppedService, - context::{StepContext, WorkflowContext}, id_generator::GenerateId, - router::{GoTo, StepResult, WorkflowRouter}, - service::handle_step_result, - step::{Layer, Stack, Step}, - workflow::Workflow, + sequential::context::{StepContext, WorkflowContext}, + sequential::router::{GoTo, StepResult, WorkflowRouter}, + sequential::service::handle_step_result, + sequential::step::{Layer, Stack, Step}, + sequential::workflow::Workflow, }; /// A layer that filters and maps task inputs to outputs. @@ -151,7 +151,7 @@ where let main_args: Vec = vec![]; let steps: Task = request.try_map(|arg| B::Codec::decode(&arg))?; let steps = steps.args.into_iter().collect::>(); - println!("Decoded steps: {:?}", steps.len()); + tracing::debug!(step_count = ?steps.len(), "Enqueuing FilterMap steps"); let mut task_ids = Vec::new(); for step in steps { let task_id = TaskId::new(B::IdType::generate()); diff --git a/apalis-workflow/src/fold/mod.rs b/apalis-workflow/src/sequential/fold/mod.rs similarity index 98% rename from apalis-workflow/src/fold/mod.rs rename to apalis-workflow/src/sequential/fold/mod.rs index 652ab94c..c13ee1da 100644 --- a/apalis-workflow/src/fold/mod.rs +++ b/apalis-workflow/src/sequential/fold/mod.rs @@ -12,11 +12,11 @@ use tower::Service; use crate::{ SteppedService, - context::{StepContext, WorkflowContext}, + sequential::context::{StepContext, WorkflowContext}, id_generator::GenerateId, - router::{GoTo, StepResult, WorkflowRouter}, - step::{Layer, Stack, Step}, - workflow::Workflow, + sequential::router::{GoTo, StepResult, WorkflowRouter}, + sequential::step::{Layer, Stack, Step}, + sequential::workflow::Workflow, }; /// The fold layer that folds over a collection of items. diff --git a/apalis-workflow/src/sequential/mod.rs b/apalis-workflow/src/sequential/mod.rs new file mode 100644 index 00000000..f4d5ac75 --- /dev/null +++ b/apalis-workflow/src/sequential/mod.rs @@ -0,0 +1,31 @@ +/// combinator for sequential workflow execution. +pub mod and_then; + +/// utilities for workflow context management. +pub mod context; +/// utilities for introducing delays in workflow execution. +pub mod delay; +/// combinator for filtering and mapping workflow items. +pub mod filter_map; +/// combinator for folding over workflow items. +pub mod fold; +/// combinator for repeating a workflow step until a condition is met. +pub mod repeat_until; +/// utilities for workflow routing. +pub mod router; +/// utilities for workflow service orchestration. +pub mod service; +/// workflow definitions. +pub mod workflow; + +/// utilities for workflow steps. +pub mod step; + +pub use crate::sequential::and_then::AndThen; +pub use crate::sequential::context::{StepContext, WorkflowContext}; +pub use crate::sequential::delay::DelayFor; +pub use crate::sequential::filter_map::FilterMap; +pub use crate::sequential::fold::Fold; +pub use crate::sequential::router::{GoTo, StepResult}; +pub use crate::sequential::step::{Layer, Stack, Step}; +pub use crate::sequential::workflow::Workflow; diff --git a/apalis-workflow/src/sequential/repeat_until/mod.rs b/apalis-workflow/src/sequential/repeat_until/mod.rs new file mode 100644 index 00000000..3d8d221b --- /dev/null +++ b/apalis-workflow/src/sequential/repeat_until/mod.rs @@ -0,0 +1,285 @@ +use std::convert::Infallible; +use std::marker::PhantomData; +use std::task::Context; + +use apalis_core::backend::TaskSinkError; +use apalis_core::backend::codec::Codec; +use apalis_core::error::BoxDynError; +use apalis_core::task::builder::TaskBuilder; +use apalis_core::task::metadata::MetadataExt; +use apalis_core::task::task_id::TaskId; +use apalis_core::task_fn::{FromRequest, TaskFn, task_fn}; +use apalis_core::{backend::BackendExt, task::Task}; +use futures::future::BoxFuture; +use futures::{FutureExt, Sink, SinkExt}; +use serde::{Deserialize, Serialize}; +use tower::Service; + +use crate::id_generator::GenerateId; +use crate::sequential::router::WorkflowRouter; +use crate::sequential::{GoTo, Layer, Stack, Step, StepContext, StepResult, WorkflowContext}; +use crate::{SteppedService, Workflow}; + +/// A layer that represents a `repeat_until` step in the workflow. +#[derive(Clone, Debug)] +pub struct RepeatUntil { + repeater: F, + _marker: PhantomData<(Input, Output)>, +} + +impl Layer for RepeatUntil +where + F: Clone, +{ + type Step = RepeatUntilStep; + + fn layer(&self, step: S) -> Self::Step { + RepeatUntilStep { + inner: step, + repeater: self.repeater.clone(), + _marker: std::marker::PhantomData, + } + } +} +impl Workflow { + /// Folds over a collection of items in the workflow. + pub fn repeat_until( + self, + repeater: F, + ) -> Workflow< + Start, + Output, + B, + Stack, Input, Output>, L>, + > + where + TaskFn: + Service, Response = Option>, + { + self.add_step(RepeatUntil { + repeater: task_fn(repeater), + _marker: PhantomData::<(Input, Output)>, + }) + } +} + +/// The step implementation for the `repeat_until` layer. +#[derive(Clone, Debug)] +pub struct RepeatUntilStep { + inner: S, + repeater: R, + _marker: PhantomData<(Input, Output)>, +} + +/// The service that handles the `repeat_until` logic +#[derive(Debug)] +pub struct RepeatUntilService { + repeater: F, + _marker: std::marker::PhantomData<(B, Input, Output)>, +} + +impl Clone for RepeatUntilService +where + F: Clone, +{ + fn clone(&self) -> Self { + Self { + repeater: self.repeater.clone(), + _marker: std::marker::PhantomData, + } + } +} + +impl Service> + for RepeatUntilService +where + F: Service, Response = Option> + Send + 'static + Clone, + B: BackendExt + + Send + + Sync + + Clone + + Sink, Error = Err> + + Unpin + + 'static, + B::Context: MetadataExt, Error = MetaErr> + + MetadataExt + + Send + + 'static, + B::Codec: Codec + + Codec + + Codec, Error = CodecError, Compact = B::Compact> + + 'static, + B::IdType: GenerateId + Send + 'static, + Err: std::error::Error + Send + Sync + 'static, + CodecError: std::error::Error + Send + Sync + 'static, + F::Error: Into + Send + 'static, + MetaErr: std::error::Error + Send + Sync + 'static, + F::Future: Send + 'static, + B::Compact: Send + 'static, + Input: Send + 'static, // We don't need Clone because decoding just needs a reference + Res: Send + 'static, +{ + type Response = GoTo>; + type Error = BoxDynError; + type Future = BoxFuture<'static, Result>; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> std::task::Poll> { + self.repeater.poll_ready(cx).map_err(|e| e.into()) + } + + fn call(&mut self, task: Task) -> Self::Future { + let state: RepeaterState = task.parts.ctx.extract().unwrap_or_default(); + let mut ctx = + task.parts.data.get::>().cloned().expect( + "StepContext missing, Did you call the repeater outside of a workflow step?", + ); + let mut repeater = self.repeater.clone(); + + let fut = async move { + let mut compact = None; + let decoded: Input = B::Codec::decode(&task.args)?; + let prev_task_id = task.parts.task_id.clone(); + let repeat_task = task.map(|c| { + compact = Some(c); + decoded + }); + let response = repeater.call(repeat_task).await.map_err(|e| e.into())?; + Ok(match response { + Some(res) if ctx.has_next => { + let task_id = TaskId::new(B::IdType::generate()); + let next_step = TaskBuilder::new(B::Codec::encode(&res)?) + .with_task_id(task_id.clone()) + .meta(WorkflowContext { + step_index: ctx.current_step + 1, + }) + .build(); + ctx.backend + .send(next_step) + .await + .map_err(|e| TaskSinkError::PushError(e))?; + GoTo::Next(StepResult { + result: B::Codec::encode(&res)?, + next_task_id: Some(task_id), + }) + } + Some(res) => GoTo::Break(StepResult { + result: B::Codec::encode(&res)?, + next_task_id: None, + }), + None => { + let task_id = TaskId::new(B::IdType::generate()); + let next_step = + TaskBuilder::new(compact.take().expect("Compact args should be set")) + .with_task_id(task_id.clone()) + .meta(WorkflowContext { + step_index: ctx.current_step, + }) + .meta(RepeaterState { + iterations: state.iterations + 1, + prev_task_id, + }) + .build(); + ctx.backend + .send(next_step) + .await + .map_err(|e| TaskSinkError::PushError(e))?; + GoTo::Break(StepResult { + result: B::Codec::encode(&None::)?, + next_task_id: Some(task_id), + }) + } + }) + } + .boxed(); + + fut + } +} + +/// The state of the fold operation +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct RepeaterState { + iterations: usize, + prev_task_id: Option>, +} + +impl Default for RepeaterState { + fn default() -> Self { + Self { + iterations: 0, + prev_task_id: None, + } + } +} + +impl RepeaterState { + /// Get the number of iterations completed so far. + pub fn iterations(&self) -> usize { + self.iterations + } + + /// Get the previous task id. + pub fn previous_task_id(&self) -> Option<&TaskId> { + self.prev_task_id.as_ref() + } +} + +impl> + Sync, IdType: Sync> + FromRequest> for RepeaterState +{ + type Error = Infallible; + async fn from_request(task: &Task) -> Result { + let state: RepeaterState = task.parts.ctx.extract().unwrap_or_default(); + Ok(RepeaterState { + iterations: state.iterations, + prev_task_id: state.prev_task_id, + }) + } +} + +impl Step + for RepeatUntilStep +where + F: Service, Response = Option> + + Send + + Sync + + 'static + + Clone, + B: BackendExt + + Send + + Sync + + Clone + + Sink, Error = Err> + + Unpin + + 'static, + B::Context: MetadataExt, Error = MetaErr> + + MetadataExt + + Send + + 'static, + B::Codec: Codec + + Codec + + Codec, Error = CodecError, Compact = B::Compact> + + 'static, + B::IdType: GenerateId + Send + 'static, + Err: std::error::Error + Send + Sync + 'static, + CodecError: std::error::Error + Send + Sync + 'static, + F::Error: Into + Send + 'static, + MetaErr: std::error::Error + Send + Sync + 'static, + F::Future: Send + 'static, + B::Compact: Send + 'static, + Input: Send + Sync + 'static, // We don't need Clone because decoding just needs a reference + Res: Send + Sync + 'static, + S: Step + Send + 'static, +{ + type Response = Res; + type Error = F::Error; + fn register(&mut self, ctx: &mut WorkflowRouter) -> Result<(), BoxDynError> { + let svc = SteppedService::new(RepeatUntilService { + repeater: self.repeater.clone(), + _marker: PhantomData::<(B, Input, Res)>, + }); + let count = ctx.steps.len(); + ctx.steps.insert(count, svc); + self.inner.register(ctx) + } +} diff --git a/apalis-workflow/src/router.rs b/apalis-workflow/src/sequential/router.rs similarity index 100% rename from apalis-workflow/src/router.rs rename to apalis-workflow/src/sequential/router.rs diff --git a/apalis-workflow/src/service.rs b/apalis-workflow/src/sequential/service.rs similarity index 98% rename from apalis-workflow/src/service.rs rename to apalis-workflow/src/sequential/service.rs index 113fa51e..55148685 100644 --- a/apalis-workflow/src/service.rs +++ b/apalis-workflow/src/sequential/service.rs @@ -14,9 +14,9 @@ use tower::Service; use crate::{ SteppedService, - context::{StepContext, WorkflowContext}, id_generator::GenerateId, - router::{GoTo, StepResult}, + sequential::context::{StepContext, WorkflowContext}, + sequential::router::{GoTo, StepResult}, }; /// The main workflow service that orchestrates the execution of workflow steps. diff --git a/apalis-workflow/src/step.rs b/apalis-workflow/src/sequential/step.rs similarity index 96% rename from apalis-workflow/src/step.rs rename to apalis-workflow/src/sequential/step.rs index 2f67e13b..62619043 100644 --- a/apalis-workflow/src/step.rs +++ b/apalis-workflow/src/sequential/step.rs @@ -1,6 +1,6 @@ use apalis_core::{backend::BackendExt, error::BoxDynError}; -use crate::router::WorkflowRouter; +use crate::sequential::router::WorkflowRouter; /// A layer to wrap a step pub trait Layer { diff --git a/apalis-workflow/src/workflow.rs b/apalis-workflow/src/sequential/workflow.rs similarity index 90% rename from apalis-workflow/src/workflow.rs rename to apalis-workflow/src/sequential/workflow.rs index d89d74e6..a7a2caa8 100644 --- a/apalis-workflow/src/workflow.rs +++ b/apalis-workflow/src/sequential/workflow.rs @@ -9,11 +9,11 @@ use apalis_core::{ use futures::Sink; use crate::{ - context::WorkflowContext, id_generator::GenerateId, - router::WorkflowRouter, - service::WorkflowService, - step::{Identity, Layer, Stack, Step}, + sequential::context::WorkflowContext, + sequential::router::WorkflowRouter, + sequential::service::WorkflowService, + sequential::step::{Identity, Layer, Stack, Step}, }; /// A workflow represents a sequence of steps to be executed in order. @@ -38,6 +38,11 @@ impl Workflow { impl Workflow { /// Adds a new step to the workflow pipeline. + /// + /// This method should be used with caution, as it allows adding arbitrary steps + /// and manipulating types. It is recommended to use higher-level abstractions for + /// common workflow patterns. + #[must_use] pub fn add_step(self, step: S) -> Workflow> { Workflow { inner: Stack::new(step, self.inner), diff --git a/apalis-workflow/src/sink.rs b/apalis-workflow/src/sink.rs index 9143b86d..fb342ded 100644 --- a/apalis-workflow/src/sink.rs +++ b/apalis-workflow/src/sink.rs @@ -4,43 +4,72 @@ use apalis_core::{ task::{Task, builder::TaskBuilder, metadata::MetadataExt, task_id::TaskId}, }; use futures::Sink; +use petgraph::graph::NodeIndex; -use crate::{context::WorkflowContext, id_generator::GenerateId}; +use crate::{sequential::WorkflowContext, dag::DagFlowContext, id_generator::GenerateId}; /// Extension trait for pushing tasks into a workflow pub trait WorkflowSink: BackendExt where Self::Codec: Codec, { - /// Push a step into the workflow sink at the start + /// Push a task into the workflow sink at the start fn push_start( &mut self, - step: Args, - ) -> impl Future>> + Send { - self.push_step(step, 0) - } + args: Args, + ) -> impl Future>> + Send; /// Push a step into the workflow sink at the specified index + /// + /// This is a helper method for pushing tasks into the workflow sink + /// with the appropriate workflow context metadata. + /// Ideally, this should be used internally by the workflow executor + /// rather than being called directly. fn push_step( &mut self, step: Args, index: usize, ) -> impl Future>> + Send; + + /// Push a node into the workflow sink at the specified index + /// + /// This is a helper method for pushing tasks into the workflow sink + /// with the appropriate DAG flow context metadata. + /// Ideally, this should be used internally by the DAG executor + /// rather than being called directly. + fn push_node( + &mut self, + node: Args, + index: NodeIndex, + ) -> impl Future>> + Send; } -impl WorkflowSink for S +impl WorkflowSink for S where S: Sink, Error = Err> + BackendExt + Unpin, S::IdType: GenerateId + Send, S::Codec: Codec, - S::Context: MetadataExt + Send, + S::Context: MetadataExt + + MetadataExt, Error = MetaErr> + + Send, Err: std::error::Error + Send + Sync + 'static, >::Error: Into + Send + Sync + 'static, - >::Error: Into + Send + Sync + 'static, + MetaErr: Into + Send + Sync + 'static, Compact: Send + 'static, { + async fn push_start(&mut self, args: Args) -> Result<(), TaskSinkError> { + use futures::SinkExt; + let task_id = TaskId::new(S::IdType::generate()); + let compact = S::Codec::encode(&args).map_err(|e| TaskSinkError::CodecError(e.into()))?; + let task = TaskBuilder::new(compact) + .with_task_id(task_id.clone()) + .build(); + self.send(task) + .await + .map_err(|e| TaskSinkError::PushError(e)) + } async fn push_step( &mut self, step: Args, @@ -57,4 +86,29 @@ where .await .map_err(|e| TaskSinkError::PushError(e)) } + + async fn push_node( + &mut self, + node: Args, + index: NodeIndex, + ) -> Result<(), TaskSinkError> { + use futures::SinkExt; + let task_id = TaskId::new(S::IdType::generate()); + let compact = S::Codec::encode(&node).map_err(|e| TaskSinkError::CodecError(e.into()))?; + let task = TaskBuilder::new(compact) + .meta(DagFlowContext { + current_node: index, + completed_nodes: Default::default(), + current_position: index.index(), + is_initial: true, + node_task_ids: Default::default(), + prev_node: None, + root_task_id: Some(task_id.clone()), + }) + .with_task_id(task_id.clone()) + .build(); + self.send(task) + .await + .map_err(|e| TaskSinkError::PushError(e)) + } } diff --git a/examples/dag-workflow/src/main.rs b/examples/dag-workflow/src/main.rs index ae35ee15..cecb05df 100644 --- a/examples/dag-workflow/src/main.rs +++ b/examples/dag-workflow/src/main.rs @@ -37,7 +37,7 @@ async fn main() -> Result<(), BoxDynError> { .await .unwrap(); - let dag_flow = DagFlow::new(); + let dag_flow = DagFlow::new("user-info-workflow"); let get_name = dag_flow.node(get_name); let get_age = dag_flow.node(get_age); let get_address = dag_flow.node(get_address); From 79591863be0af8e192aeaa7f218b68b7ba2bfa98 Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Tue, 23 Dec 2025 10:03:39 +0300 Subject: [PATCH 11/12] chore: clippy and lints --- apalis-core/src/worker/context.rs | 6 +- apalis-sql/src/ext.rs | 2 + apalis-workflow/src/dag/executor.rs | 4 +- apalis-workflow/src/dag/mod.rs | 4 +- apalis-workflow/src/dag/service.rs | 290 +++++++++--------- apalis-workflow/src/id_generator.rs | 10 +- apalis-workflow/src/sequential/delay/mod.rs | 4 +- .../src/sequential/filter_map/mod.rs | 2 +- apalis-workflow/src/sequential/fold/mod.rs | 2 +- .../src/sequential/repeat_until/mod.rs | 14 +- apalis-workflow/src/sink.rs | 2 +- utils/apalis-file-storage/src/util.rs | 2 +- 12 files changed, 163 insertions(+), 179 deletions(-) diff --git a/apalis-core/src/worker/context.rs b/apalis-core/src/worker/context.rs index 3f6e9cc7..8789bc94 100644 --- a/apalis-core/src/worker/context.rs +++ b/apalis-core/src/worker/context.rs @@ -273,11 +273,7 @@ impl WorkerContext { /// Is the shutdown token called #[must_use] pub fn is_shutting_down(&self) -> bool { - self.is_stopped() - || self - .shutdown - .as_ref() - .map_or(false, |s| s.is_shutting_down()) + self.is_stopped() || self.shutdown.as_ref().is_some_and(|s| s.is_shutting_down()) } /// Allows workers to emit events diff --git a/apalis-sql/src/ext.rs b/apalis-sql/src/ext.rs index 4c2f0c11..c3587a5b 100644 --- a/apalis-sql/src/ext.rs +++ b/apalis-sql/src/ext.rs @@ -5,9 +5,11 @@ use crate::context::SqlContext; /// Extension traits for [`TaskBuilder`] pub trait TaskBuilderExt { /// Set the max number of attempts for the task being built. + #[must_use] fn max_attempts(self, attempts: u32) -> Self; /// Set the priority for the task being built. + #[must_use] fn priority(self, priority: i32) -> Self; } diff --git a/apalis-workflow/src/dag/executor.rs b/apalis-workflow/src/dag/executor.rs index be512bec..7e567ae4 100644 --- a/apalis-workflow/src/dag/executor.rs +++ b/apalis-workflow/src/dag/executor.rs @@ -93,7 +93,7 @@ where .node_weight_mut(self.not_ready[0]) .unwrap() .poll_ready(cx) - .map_err(|e| DagFlowError::Service(e))? + .map_err(DagFlowError::Service)? .is_pending() { return Poll::Pending; @@ -119,7 +119,7 @@ where .node_weight_mut(context.current_node) .ok_or_else(|| DagFlowError::MissingService(context.current_node))?; - let result = service.call(req).await.map_err(|e| DagFlowError::Node(e))?; + let result = service.call(req).await.map_err(DagFlowError::Node)?; Ok(result) }) diff --git a/apalis-workflow/src/dag/mod.rs b/apalis-workflow/src/dag/mod.rs index 567c1814..264e4369 100644 --- a/apalis-workflow/src/dag/mod.rs +++ b/apalis-workflow/src/dag/mod.rs @@ -61,7 +61,7 @@ where { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "DAG name: {}", self.name)?; - write!(f, "Dot format:\n")?; + writeln!(f, "Dot format:")?; f.write_str(&self.to_dot()) } } @@ -302,11 +302,13 @@ pub struct NodeHandle { impl NodeHandle { /// Get the node ID + #[must_use] pub fn id(&self) -> NodeIndex { self.id } /// Get the edge IDs + #[must_use] pub fn edges(&self) -> &[EdgeIndex] { &self.edges } diff --git a/apalis-workflow/src/dag/service.rs b/apalis-workflow/src/dag/service.rs index d97266bd..5509c38e 100644 --- a/apalis-workflow/src/dag/service.rs +++ b/apalis-workflow/src/dag/service.rs @@ -99,159 +99,145 @@ where let ctx = req .extract::>>() .await; - let (response, context) = match ctx { - Ok(Meta(context)) => { - #[cfg(feature = "tracing")] - tracing::debug!( - node = ?context.current_node, - "Extracted DagFlowContext for task" - ); - let incoming_nodes = executor - .graph - .neighbors_directed(context.current_node, Direction::Incoming) - .collect::>(); - match incoming_nodes.len() { - // Entry node - 0 if start_nodes.len() == 1 => { - let response = executor.call(req).await?; - (response, context) - } - // Entry node with multiple start nodes - 0 if start_nodes.len() > 1 => { - let response = executor.call(req).await?; - (response, context) - } - // Single incoming node, proceed normally - 1 => { - let response = executor.call(req).await?; - (response, context) - } - // Multiple incoming nodes, fan-in scenario - _ => { - let dependency_task_ids = - context.get_dependency_task_ids(&incoming_nodes); - #[cfg(feature = "tracing")] - tracing::debug!( - prev_node = ?context.prev_node, - node = ?context.current_node, - deps = ?dependency_task_ids, - "Fanning in from multiple dependencies", - ); - let results = backend - .check_status( - dependency_task_ids.values().cloned().collect::>(), - ) - .await?; - if results.iter().all(|s| matches!(s.status, Status::Done)) { - let sorted_results = { - // Match the order of incoming_nodes by matching NodeIndex - let res =incoming_nodes - .iter() - .rev() - .map(|node_index| { - let task_id = context.node_task_ids - .iter() - .find(|(n, _)| *n == node_index) - .map(|(_, task_id)| task_id) - .ok_or(BoxDynError::from("TaskId for incoming node not found"))?; - let task_result = results.iter().find(|r| &r.task_id == task_id).ok_or( - BoxDynError::from(format!( - "TaskResult for task_id {:?} not found", - task_id - )))?; - Ok(task_result) - }) - .collect::, BoxDynError>>(); - match res { - Ok(v) => v, - Err(_) => return Ok(DagExecutionResponse::WaitingForDependencies { pending_dependencies: dependency_task_ids }), - } - }; - let encoded_input = B::Codec::encode( - &sorted_results - .iter() - .map(|s| match &s.result { - Ok(val) => { - let decoded: DagExecutionResponse< - B::Compact, - B::IdType, - > = B::Codec::decode(val).map_err(|e: CdcErr| { - format!( - "Failed to decode dependency result: {:?}", - e.into() - ) - })?; - match decoded { - DagExecutionResponse::FanOut { response, .. } => { - return Ok(response); - } - DagExecutionResponse::EnqueuedNext { result } => { - return Ok(result); - } - DagExecutionResponse::Complete { result } => { - Ok(result) - } - _ => Err(format!( - "Dependency task returned invalid response, which is unexpected during fan-in" - )) + let (response, context) = if let Ok(Meta(context)) = ctx { + #[cfg(feature = "tracing")] + tracing::debug!( + node = ?context.current_node, + "Extracted DagFlowContext for task" + ); + let incoming_nodes = executor + .graph + .neighbors_directed(context.current_node, Direction::Incoming) + .collect::>(); + match incoming_nodes.len() { + // Entry node + 0 if start_nodes.len() == 1 => { + let response = executor.call(req).await?; + (response, context) + } + // Entry node with multiple start nodes + 0 if start_nodes.len() > 1 => { + let response = executor.call(req).await?; + (response, context) + } + // Single incoming node, proceed normally + 1 => { + let response = executor.call(req).await?; + (response, context) + } + // Multiple incoming nodes, fan-in scenario + _ => { + let dependency_task_ids = + context.get_dependency_task_ids(&incoming_nodes); + #[cfg(feature = "tracing")] + tracing::debug!( + prev_node = ?context.prev_node, + node = ?context.current_node, + deps = ?dependency_task_ids, + "Fanning in from multiple dependencies", + ); + let results = backend + .check_status( + dependency_task_ids.values().cloned().collect::>(), + ) + .await?; + if results.iter().all(|s| matches!(s.status, Status::Done)) { + let sorted_results = { + // Match the order of incoming_nodes by matching NodeIndex + let res =incoming_nodes + .iter() + .rev() + .map(|node_index| { + let task_id = context.node_task_ids + .iter() + .find(|(n, _)| *n == node_index) + .map(|(_, task_id)| task_id) + .ok_or(BoxDynError::from("TaskId for incoming node not found"))?; + let task_result = results.iter().find(|r| &r.task_id == task_id).ok_or( + BoxDynError::from(format!( + "TaskResult for task_id {task_id:?} not found" + )))?; + Ok(task_result) + }) + .collect::, BoxDynError>>(); + match res { + Ok(v) => v, + Err(_) => return Ok(DagExecutionResponse::WaitingForDependencies { pending_dependencies: dependency_task_ids }), + } + }; + let encoded_input = B::Codec::encode( + &sorted_results + .iter() + .map(|s| match &s.result { + Ok(val) => { + let decoded: DagExecutionResponse< + B::Compact, + B::IdType, + > = B::Codec::decode(val).map_err(|e: CdcErr| { + format!( + "Failed to decode dependency result: {:?}", + e.into() + ) + })?; + match decoded { + DagExecutionResponse::FanOut { response, .. } => { + Ok(response) } + DagExecutionResponse::EnqueuedNext { result } | DagExecutionResponse::Complete { result } => { + Ok(result) + } + _ => Err("Dependency task returned invalid response, which is unexpected during fan-in".to_owned()) } - Err(e) => { - return Err(format!( - "Dependency task failed: {:?}", - e - )); - } - }) - .collect::, String>>()?, - ) - .map_err(|e| e.into())?; - let req = req.map(|_| encoded_input); // Replace args with fan-in input - let response = executor.call(req).await?; - (response, context) - } else { - return Ok(DagExecutionResponse::WaitingForDependencies { - pending_dependencies: dependency_task_ids, - }); - } - } - } - } - - Err(_) => { - #[cfg(feature = "tracing")] - tracing::debug!( - "Extracting DagFlowContext for task without meta" - ); - // if no metadata, we assume its an entry task - match start_nodes.len() { - 1 => { - #[cfg(feature = "tracing")] - tracing::debug!("Single start node detected, proceeding with execution"); - let context = DagFlowContext::new(req.parts.task_id.clone()); - req.parts - .ctx - .inject(context.clone()) - .map_err(|e| e.into())?; + } + Err(e) => { + Err(format!( + "Dependency task failed: {e:?}" + )) + } + }) + .collect::, String>>()?, + ) + .map_err(|e| e.into())?; + let req = req.map(|_| encoded_input); // Replace args with fan-in input let response = executor.call(req).await?; - #[cfg(feature = "tracing")] - tracing::debug!(node = ?context.current_node, "Execution complete at node"); (response, context) - } - _ => { - let new_node_task_ids = fan_out_entry_nodes( - &executor, - &mut backend, - &mut DagFlowContext::new(req.parts.task_id.clone()), - &req.args, - ) - .await?; - return Ok(DagExecutionResponse::EntryFanOut { - node_task_ids: new_node_task_ids, + } else { + return Ok(DagExecutionResponse::WaitingForDependencies { + pending_dependencies: dependency_task_ids, }); } } } + } else { + #[cfg(feature = "tracing")] + tracing::debug!( + "Extracting DagFlowContext for task without meta" + ); + // if no metadata, we assume its an entry task + if start_nodes.len() == 1 { + #[cfg(feature = "tracing")] + tracing::debug!("Single start node detected, proceeding with execution"); + let context = DagFlowContext::new(req.parts.task_id.clone()); + req.parts + .ctx + .inject(context.clone()) + .map_err(|e| e.into())?; + let response = executor.call(req).await?; + #[cfg(feature = "tracing")] + tracing::debug!(node = ?context.current_node, "Execution complete at node"); + (response, context) + } else { + let new_node_task_ids = fan_out_entry_nodes( + &executor, + &backend, + &DagFlowContext::new(req.parts.task_id.clone()), + &req.args, + ) + .await?; + return Ok(DagExecutionResponse::EntryFanOut { + node_task_ids: new_node_task_ids, + }); + } }; // At this point we know a node was executed and we have its context // We need to figure out the outgoing nodes and enqueue tasks for them @@ -292,8 +278,8 @@ where let next_task_ids = fan_out_next_nodes( &executor, outgoing_nodes, - &mut backend, - &mut new_context, + &backend, + &new_context, &response, ) .await?; @@ -312,8 +298,8 @@ where async fn fan_out_next_nodes( _executor: &DagExecutor, outgoing_nodes: Vec, - backend: &mut B, - context: &mut DagFlowContext, + backend: &B, + context: &DagFlowContext, input: &B::Compact, ) -> Result>, BoxDynError> where @@ -341,7 +327,7 @@ where let task = TaskBuilder::new(input.clone()) .with_task_id(task_id.clone()) .meta(DagFlowContext { - prev_node: context.prev_node.clone(), + prev_node: context.prev_node, current_node: outgoing_node, completed_nodes: context.completed_nodes.clone(), node_task_ids: node_task_ids.clone(), @@ -365,8 +351,8 @@ where async fn fan_out_entry_nodes( executor: &DagExecutor, - backend: &mut B, - context: &mut DagFlowContext, + backend: &B, + context: &DagFlowContext, input: &B::Compact, ) -> Result>, BoxDynError> where diff --git a/apalis-workflow/src/id_generator.rs b/apalis-workflow/src/id_generator.rs index cdbaf8e0..19ae28cd 100644 --- a/apalis-workflow/src/id_generator.rs +++ b/apalis-workflow/src/id_generator.rs @@ -9,7 +9,7 @@ pub trait GenerateId { #[cfg(feature = "uuid")] impl GenerateId for uuid::Uuid { fn generate() -> Self { - uuid::Uuid::new_v4() + Self::new_v4() } } @@ -29,26 +29,26 @@ impl GenerateId for RandomId { #[cfg(feature = "rand")] impl GenerateId for u64 { fn generate() -> Self { - rand::random::() + rand::random::() } } #[cfg(feature = "rand")] impl GenerateId for i64 { fn generate() -> Self { - rand::random::() + rand::random::() } } #[cfg(feature = "rand")] impl GenerateId for u128 { fn generate() -> Self { - rand::random::() + rand::random::() } } #[cfg(feature = "rand")] impl GenerateId for i128 { fn generate() -> Self { - rand::random::() + rand::random::() } } diff --git a/apalis-workflow/src/sequential/delay/mod.rs b/apalis-workflow/src/sequential/delay/mod.rs index e17085fb..0a05ec9c 100644 --- a/apalis-workflow/src/sequential/delay/mod.rs +++ b/apalis-workflow/src/sequential/delay/mod.rs @@ -11,8 +11,8 @@ use tower::Service; use crate::{ SteppedService, Workflow, - sequential::context::{StepContext, WorkflowContext}, id_generator::GenerateId, + sequential::context::{StepContext, WorkflowContext}, sequential::router::{GoTo, StepResult, WorkflowRouter}, sequential::step::{Layer, Stack, Step}, }; @@ -109,7 +109,7 @@ pub struct DelayWithStep { impl Clone for DelayWithStep { fn clone(&self) -> Self { - DelayWithStep { + Self { f: self.f.clone(), inner: self.inner.clone(), _marker: std::marker::PhantomData, diff --git a/apalis-workflow/src/sequential/filter_map/mod.rs b/apalis-workflow/src/sequential/filter_map/mod.rs index f23aec2e..d6b8d104 100644 --- a/apalis-workflow/src/sequential/filter_map/mod.rs +++ b/apalis-workflow/src/sequential/filter_map/mod.rs @@ -69,7 +69,7 @@ pub struct FilterService { impl Clone for FilterService { fn clone(&self) -> Self { - FilterService { + Self { service: self.service.clone(), _marker: PhantomData, } diff --git a/apalis-workflow/src/sequential/fold/mod.rs b/apalis-workflow/src/sequential/fold/mod.rs index c13ee1da..bedea332 100644 --- a/apalis-workflow/src/sequential/fold/mod.rs +++ b/apalis-workflow/src/sequential/fold/mod.rs @@ -12,8 +12,8 @@ use tower::Service; use crate::{ SteppedService, - sequential::context::{StepContext, WorkflowContext}, id_generator::GenerateId, + sequential::context::{StepContext, WorkflowContext}, sequential::router::{GoTo, StepResult, WorkflowRouter}, sequential::step::{Layer, Stack, Step}, sequential::workflow::Workflow, diff --git a/apalis-workflow/src/sequential/repeat_until/mod.rs b/apalis-workflow/src/sequential/repeat_until/mod.rs index 3d8d221b..efaf289a 100644 --- a/apalis-workflow/src/sequential/repeat_until/mod.rs +++ b/apalis-workflow/src/sequential/repeat_until/mod.rs @@ -135,7 +135,7 @@ where ); let mut repeater = self.repeater.clone(); - let fut = async move { + (async move { let mut compact = None; let decoded: Input = B::Codec::decode(&task.args)?; let prev_task_id = task.parts.task_id.clone(); @@ -190,9 +190,7 @@ where } }) } - .boxed(); - - fut + .boxed()) as _ } } @@ -224,13 +222,13 @@ impl RepeaterState { } } -impl> + Sync, IdType: Sync> - FromRequest> for RepeaterState +impl + Sync, IdType: Sync> FromRequest> + for RepeaterState { type Error = Infallible; async fn from_request(task: &Task) -> Result { - let state: RepeaterState = task.parts.ctx.extract().unwrap_or_default(); - Ok(RepeaterState { + let state: Self = task.parts.ctx.extract().unwrap_or_default(); + Ok(Self { iterations: state.iterations, prev_task_id: state.prev_task_id, }) diff --git a/apalis-workflow/src/sink.rs b/apalis-workflow/src/sink.rs index fb342ded..69d2b790 100644 --- a/apalis-workflow/src/sink.rs +++ b/apalis-workflow/src/sink.rs @@ -6,7 +6,7 @@ use apalis_core::{ use futures::Sink; use petgraph::graph::NodeIndex; -use crate::{sequential::WorkflowContext, dag::DagFlowContext, id_generator::GenerateId}; +use crate::{dag::DagFlowContext, id_generator::GenerateId, sequential::WorkflowContext}; /// Extension trait for pushing tasks into a workflow pub trait WorkflowSink: BackendExt diff --git a/utils/apalis-file-storage/src/util.rs b/utils/apalis-file-storage/src/util.rs index 26f5e6bd..42076433 100644 --- a/utils/apalis-file-storage/src/util.rs +++ b/utils/apalis-file-storage/src/util.rs @@ -191,7 +191,7 @@ where results.push(apalis_core::backend::TaskResult { task_id: task_id.clone(), status: Status::Pending, - result: Err(format!("Task still pending")), + result: Err("Task not completed yet".to_owned()), }); continue; } From fa21577521d0f3ca6feaf7333cc5428181922fcf Mon Sep 17 00:00:00 2001 From: geofmureithi Date: Tue, 23 Dec 2025 16:05:13 +0300 Subject: [PATCH 12/12] chore: minor changes to get things working --- Cargo.lock | 4 ++-- apalis-workflow/src/dag/mod.rs | 1 - apalis-workflow/src/sequential/filter_map/mod.rs | 1 + supply-chain/config.toml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 98888ec4..bd474341 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2053,9 +2053,9 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ "bitflags 2.10.0", "errno", diff --git a/apalis-workflow/src/dag/mod.rs b/apalis-workflow/src/dag/mod.rs index 264e4369..86490f3a 100644 --- a/apalis-workflow/src/dag/mod.rs +++ b/apalis-workflow/src/dag/mod.rs @@ -82,7 +82,6 @@ where /// Add a node to the DAG #[must_use] - #[allow(clippy::todo)] pub fn add_node( &self, name: &str, diff --git a/apalis-workflow/src/sequential/filter_map/mod.rs b/apalis-workflow/src/sequential/filter_map/mod.rs index d6b8d104..abe1f890 100644 --- a/apalis-workflow/src/sequential/filter_map/mod.rs +++ b/apalis-workflow/src/sequential/filter_map/mod.rs @@ -151,6 +151,7 @@ where let main_args: Vec = vec![]; let steps: Task = request.try_map(|arg| B::Codec::decode(&arg))?; let steps = steps.args.into_iter().collect::>(); + #[cfg(feature = "tracing")] tracing::debug!(step_count = ?steps.len(), "Enqueuing FilterMap steps"); let mut task_ids = Vec::new(); for step in steps { diff --git a/supply-chain/config.toml b/supply-chain/config.toml index 902f1037..fd8b3d64 100644 --- a/supply-chain/config.toml +++ b/supply-chain/config.toml @@ -769,7 +769,7 @@ version = "0.4.1" criteria = "safe-to-deploy" [[exemptions.rustix]] -version = "1.1.2" +version = "1.1.3" criteria = "safe-to-deploy" [[exemptions.rustls]]