Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 82 additions & 9 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

11 changes: 6 additions & 5 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,30 +11,31 @@ keywords = ["apalis", "background-jobs", "task-queue", "sqlite", "async"]
categories = ["asynchronous", "database"]

[features]
default = ["tokio-comp", "migrate", "json"]
default = ["tokio-comp", "migrate", "json", "chrono"]
migrate = ["sqlx/migrate", "sqlx/macros"]
async-std-comp = ["async-std", "sqlx/runtime-async-std-rustls"]
async-std-comp-native-tls = ["async-std", "sqlx/runtime-async-std-native-tls"]
tokio-comp = ["tokio", "sqlx/runtime-tokio-rustls"]
tokio-comp-native-tls = ["tokio", "sqlx/runtime-tokio-native-tls"]
json = ["apalis-codec/json", "sqlx/json"]
chrono = ["apalis-sql/chrono", "sqlx/chrono"]
time = ["apalis-sql/time", "sqlx/time"]

[dependencies.sqlx]
version = "0.8.6"
default-features = false
features = ["chrono", "sqlite"]
features = ["sqlite"]

[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = { version = "1" }
apalis-codec = { version = "0.1.0-rc.1", default-features = false }
apalis-core = { version = "1.0.0-rc.1", features = ["sleep"] }
apalis-sql = { version = "1.0.0-rc.1" }
apalis-core = { path = "../apalis/apalis-core", version = "1.0.0-beta.2", features = ["sleep"] }
apalis-sql = { path = "../apalis/apalis-sql", version = "1.0.0-beta.2", default-features = false }
log = "0.4.21"
futures = "0.3.30"
tokio = { version = "1", features = ["rt", "net"], optional = true }
async-std = { version = "1.13.0", optional = true }
chrono = { version = "0.4", features = ["serde"] }
thiserror = "2.0.0"
pin-project = "1.1.10"
ulid = { version = "1", features = ["serde"] }
Expand Down
2 changes: 1 addition & 1 deletion src/fetcher.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ use apalis_core::{
task::Task,
worker::context::WorkerContext,
};
use apalis_sql::from_row::TaskRow;
use apalis_sql::TaskRow;
use futures::{FutureExt, future::BoxFuture, stream::Stream};
use pin_project::pin_project;
use sqlx::{Pool, Sqlite, SqlitePool};
Expand Down
35 changes: 13 additions & 22 deletions src/from_row.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use chrono::{TimeZone, Utc};
use apalis_sql::{SqlDateTime, SqlDateTimeExt, TaskRow};

#[derive(Debug)]
pub(crate) struct SqliteTaskRow {
Expand All @@ -17,11 +17,11 @@ pub(crate) struct SqliteTaskRow {
pub(crate) metadata: Option<String>,
}

impl TryInto<apalis_sql::from_row::TaskRow> for SqliteTaskRow {
impl TryInto<TaskRow> for SqliteTaskRow {
type Error = sqlx::Error;

fn try_into(self) -> Result<apalis_sql::from_row::TaskRow, Self::Error> {
Ok(apalis_sql::from_row::TaskRow {
fn try_into(self) -> Result<TaskRow, Self::Error> {
Ok(TaskRow {
job: self.job,
id: self
.id
Expand All @@ -37,28 +37,19 @@ impl TryInto<apalis_sql::from_row::TaskRow> for SqliteTaskRow {
.ok_or_else(|| sqlx::Error::Protocol("Missing attempts".into()))?
as usize,
max_attempts: self.max_attempts.map(|v| v as usize),
run_at: self.run_at.map(|ts| {
Utc.timestamp_opt(ts, 0)
.single()
.ok_or_else(|| sqlx::Error::Protocol("Invalid run_at timestamp".into()))
.unwrap()
}),
run_at: self
.run_at
.map(|ts| <SqlDateTime as SqlDateTimeExt>::from_unix_timestamp(ts)),
last_result: self
.last_result
.map(|res| serde_json::from_str(&res).unwrap_or(serde_json::Value::Null)),
lock_at: self.lock_at.map(|ts| {
Utc.timestamp_opt(ts, 0)
.single()
.ok_or_else(|| sqlx::Error::Protocol("Invalid run_at timestamp".into()))
.unwrap()
}),
lock_at: self
.lock_at
.map(|ts| <SqlDateTime as SqlDateTimeExt>::from_unix_timestamp(ts)),
lock_by: self.lock_by,
done_at: self.done_at.map(|ts| {
Utc.timestamp_opt(ts, 0)
.single()
.ok_or_else(|| sqlx::Error::Protocol("Invalid run_at timestamp".into()))
.unwrap()
}),
done_at: self
.done_at
.map(|ts| <SqlDateTime as SqlDateTimeExt>::from_unix_timestamp(ts)),
priority: self.priority.map(|v| v as usize),
metadata: self
.metadata
Expand Down
13 changes: 2 additions & 11 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use std::{fmt, marker::PhantomData};

use apalis_codec::json::JsonCodec;
use apalis_core::{
backend::{Backend, BackendExt, TaskStream, codec::Codec, queue::Queue},
backend::{Backend, BackendExt, TaskStream, codec::Codec},
features_table,
layers::Stack,
task::Task,
Expand Down Expand Up @@ -49,12 +49,11 @@ mod shared;
pub mod sink;

/// Type alias for sqlite context
pub type SqliteContext = SqlContext<SqlitePool>;
pub type SqliteContext = SqlContext;

/// Type alias for a task stored in sqlite backend
pub type SqliteTask<Args> = Task<Args, SqliteContext, Ulid>;
pub use apalis_sql::config::Config;
pub use apalis_sql::ext::TaskBuilderExt;
pub use callback::{DbEvent, HookCallbackListener};
pub use shared::{SharedSqliteError, SharedSqliteStorage};

Expand Down Expand Up @@ -323,10 +322,6 @@ where
type Compact = CompactType;
type CompactStream = TaskStream<SqliteTask<Self::Compact>, sqlx::Error>;

fn get_queue(&self) -> Queue {
self.config.queue().to_owned()
}

fn poll_compact(self, worker: &WorkerContext) -> Self::CompactStream {
self.poll_default(worker).boxed()
}
Expand Down Expand Up @@ -396,10 +391,6 @@ where
type Compact = CompactType;
type CompactStream = TaskStream<SqliteTask<Self::Compact>, sqlx::Error>;

fn get_queue(&self) -> Queue {
self.config.queue().to_owned()
}

fn poll_compact(self, worker: &WorkerContext) -> Self::CompactStream {
self.poll_with_listener(worker).boxed()
}
Expand Down
2 changes: 1 addition & 1 deletion src/queries/fetch_by_id.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use apalis_core::{
backend::{BackendExt, FetchById, codec::Codec},
task::task_id::TaskId,
};
use apalis_sql::from_row::{FromRowError, TaskRow};
use apalis_sql::{from_row::FromRowError, TaskRow};
use ulid::Ulid;

use crate::{CompactType, SqliteContext, SqliteStorage, SqliteTask, from_row::SqliteTaskRow};
Expand Down
2 changes: 1 addition & 1 deletion src/queries/list_tasks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use apalis_core::{
backend::{BackendExt, Filter, ListAllTasks, ListTasks, codec::Codec},
task::{Task, status::Status},
};
use apalis_sql::from_row::{FromRowError, TaskRow};
use apalis_sql::{from_row::FromRowError, TaskRow};
use ulid::Ulid;

use crate::{CompactType, SqliteContext, SqliteStorage, SqliteTask, from_row::SqliteTaskRow};
Expand Down
4 changes: 2 additions & 2 deletions src/queries/wait_for.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ where
Self: BackendExt<IdType = Ulid, Codec = Decode, Error = sqlx::Error, Compact = CompactType>,
Result<O, String>: DeserializeOwned,
{
type ResultStream = BoxStream<'static, Result<TaskResult<O, Ulid>, Self::Error>>;
type ResultStream = BoxStream<'static, Result<TaskResult<O>, Self::Error>>;
fn wait_for(
&self,
task_ids: impl IntoIterator<Item = TaskId<Self::IdType>>,
Expand Down Expand Up @@ -78,7 +78,7 @@ where
fn check_status(
&self,
task_ids: impl IntoIterator<Item = TaskId<Self::IdType>> + Send,
) -> impl Future<Output = Result<Vec<TaskResult<O, Ulid>>, Self::Error>> + Send {
) -> impl Future<Output = Result<Vec<TaskResult<O>>, Self::Error>> + Send {
let pool = self.pool.clone();
let ids: Vec<String> = task_ids.into_iter().map(|id| id.to_string()).collect();

Expand Down
Loading