Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .changepacks/changepack_log_mTA6OO1hfY3fTj98u37V0.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{"changes":{"crates/vespertide-loader/Cargo.toml":"Patch","crates/vespertide-macro/Cargo.toml":"Patch","crates/vespertide-query/Cargo.toml":"Patch","crates/vespertide-core/Cargo.toml":"Patch","crates/vespertide-planner/Cargo.toml":"Patch","crates/vespertide/Cargo.toml":"Patch","crates/vespertide-cli/Cargo.toml":"Patch","crates/vespertide-exporter/Cargo.toml":"Patch","crates/vespertide-config/Cargo.toml":"Patch"},"note":"Fix ordering on diff, sql commands, Support enum on sqlite","date":"2025-12-18T06:48:29.591923100Z"}
78 changes: 47 additions & 31 deletions crates/vespertide-planner/src/diff.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::{BTreeMap, BTreeSet, HashSet, VecDeque};

use vespertide_core::{MigrationAction, MigrationPlan, TableConstraint, TableDef};

Expand All @@ -16,7 +16,8 @@ fn topological_sort_tables<'a>(tables: &[&'a TableDef]) -> Result<Vec<&'a TableD
let table_names: HashSet<&str> = tables.iter().map(|t| t.name.as_str()).collect();

// Build adjacency list: for each table, list the tables it depends on (via FK)
let mut dependencies: HashMap<&str, Vec<&str>> = HashMap::new();
// Use BTreeMap for consistent ordering
let mut dependencies: BTreeMap<&str, Vec<&str>> = BTreeMap::new();
for table in tables {
let mut deps = Vec::new();
for constraint in &table.constraints {
Expand All @@ -32,7 +33,8 @@ fn topological_sort_tables<'a>(tables: &[&'a TableDef]) -> Result<Vec<&'a TableD

// Kahn's algorithm for topological sort
// Calculate in-degrees (number of tables that depend on each table)
let mut in_degree: HashMap<&str, usize> = HashMap::new();
// Use BTreeMap for consistent ordering
let mut in_degree: BTreeMap<&str, usize> = BTreeMap::new();
for table in tables {
in_degree.entry(table.name.as_str()).or_insert(0);
}
Expand All @@ -49,33 +51,38 @@ fn topological_sort_tables<'a>(tables: &[&'a TableDef]) -> Result<Vec<&'a TableD
}

// Start with tables that have no dependencies
let mut queue: VecDeque<&str> = VecDeque::new();
for table in tables {
if in_degree.get(table.name.as_str()) == Some(&0) {
queue.push_back(table.name.as_str());
}
}
// BTreeMap iteration is already sorted by key
let mut queue: VecDeque<&str> = in_degree
.iter()
.filter(|(_, deg)| **deg == 0)
.map(|(name, _)| *name)
.collect();

let mut result: Vec<&TableDef> = Vec::new();
let table_map: HashMap<&str, &TableDef> =
let table_map: BTreeMap<&str, &TableDef> =
tables.iter().map(|t| (t.name.as_str(), *t)).collect();

while let Some(table_name) = queue.pop_front() {
if let Some(&table) = table_map.get(table_name) {
result.push(table);
}

// For each table that depends on this one, decrement its in-degree
// Collect tables that become ready (in-degree becomes 0)
// Use BTreeSet for consistent ordering
let mut ready_tables: BTreeSet<&str> = BTreeSet::new();
for (dependent, deps) in &dependencies {
if deps.contains(&table_name)
&& let Some(degree) = in_degree.get_mut(dependent)
{
*degree -= 1;
if *degree == 0 {
queue.push_back(dependent);
ready_tables.insert(dependent);
}
}
}
for t in ready_tables {
queue.push_back(t);
}
}

// Check for cycles
Expand Down Expand Up @@ -105,7 +112,7 @@ fn extract_delete_table_name(action: &MigrationAction) -> &str {
}
}

fn sort_delete_tables(actions: &mut [MigrationAction], all_tables: &HashMap<&str, &TableDef>) {
fn sort_delete_tables(actions: &mut [MigrationAction], all_tables: &BTreeMap<&str, &TableDef>) {
// Collect DeleteTable actions and their indices
let delete_indices: Vec<usize> = actions
.iter()
Expand All @@ -124,14 +131,16 @@ fn sort_delete_tables(actions: &mut [MigrationAction], all_tables: &HashMap<&str
}

// Extract table names being deleted
let delete_table_names: HashSet<&str> = delete_indices
// Use BTreeSet for consistent ordering
let delete_table_names: BTreeSet<&str> = delete_indices
.iter()
.map(|&i| extract_delete_table_name(&actions[i]))
.collect();

// Build dependency graph for tables being deleted
// dependencies[A] = [B] means A has FK referencing B
let mut dependencies: HashMap<&str, Vec<&str>> = HashMap::new();
// Use BTreeMap for consistent ordering
let mut dependencies: BTreeMap<&str, Vec<&str>> = BTreeMap::new();
for &table_name in &delete_table_names {
let mut deps = Vec::new();
if let Some(table_def) = all_tables.get(table_name) {
Expand All @@ -149,7 +158,8 @@ fn sort_delete_tables(actions: &mut [MigrationAction], all_tables: &HashMap<&str

// Use Kahn's algorithm for topological sort
// in_degree[A] = number of tables A depends on
let mut in_degree: HashMap<&str, usize> = HashMap::new();
// Use BTreeMap for consistent ordering
let mut in_degree: BTreeMap<&str, usize> = BTreeMap::new();
for &table_name in &delete_table_names {
in_degree.insert(
table_name,
Expand All @@ -158,28 +168,33 @@ fn sort_delete_tables(actions: &mut [MigrationAction], all_tables: &HashMap<&str
}

// Start with tables that have no dependencies (can be deleted last in creation order)
let mut queue: VecDeque<&str> = VecDeque::new();
for &table_name in &delete_table_names {
if in_degree.get(table_name) == Some(&0) {
queue.push_back(table_name);
}
}
// BTreeMap iteration is already sorted
let mut queue: VecDeque<&str> = in_degree
.iter()
.filter(|(_, deg)| **deg == 0)
.map(|(name, _)| *name)
.collect();

let mut sorted_tables: Vec<&str> = Vec::new();
while let Some(table_name) = queue.pop_front() {
sorted_tables.push(table_name);

// For each table that has this one as a dependency, decrement its in-degree
// Use BTreeSet for consistent ordering of newly ready tables
let mut ready_tables: BTreeSet<&str> = BTreeSet::new();
for (&dependent, deps) in &dependencies {
if deps.contains(&table_name)
&& let Some(degree) = in_degree.get_mut(dependent)
{
*degree -= 1;
if *degree == 0 {
queue.push_back(dependent);
ready_tables.insert(dependent);
}
}
}
for t in ready_tables {
queue.push_back(t);
}
}

// Reverse to get deletion order (tables with dependencies should be deleted first)
Expand Down Expand Up @@ -234,11 +249,12 @@ pub fn diff_schemas(from: &[TableDef], to: &[TableDef]) -> Result<MigrationPlan,
})
.collect::<Result<Vec<_>, _>>()?;

let from_map: HashMap<_, _> = from_normalized
// Use BTreeMap for consistent ordering
let from_map: BTreeMap<_, _> = from_normalized
.iter()
.map(|t| (t.name.as_str(), t))
.collect();
let to_map: HashMap<_, _> = to_normalized.iter().map(|t| (t.name.as_str(), t)).collect();
let to_map: BTreeMap<_, _> = to_normalized.iter().map(|t| (t.name.as_str(), t)).collect();

// Drop tables that disappeared.
for name in from_map.keys() {
Expand All @@ -252,13 +268,13 @@ pub fn diff_schemas(from: &[TableDef], to: &[TableDef]) -> Result<MigrationPlan,
// Update existing tables and their indexes/columns.
for (name, to_tbl) in &to_map {
if let Some(from_tbl) = from_map.get(name) {
// Columns
let from_cols: HashMap<_, _> = from_tbl
// Columns - use BTreeMap for consistent ordering
let from_cols: BTreeMap<_, _> = from_tbl
.columns
.iter()
.map(|c| (c.name.as_str(), c))
.collect();
let to_cols: HashMap<_, _> = to_tbl
let to_cols: BTreeMap<_, _> = to_tbl
.columns
.iter()
.map(|c| (c.name.as_str(), c))
Expand Down Expand Up @@ -300,13 +316,13 @@ pub fn diff_schemas(from: &[TableDef], to: &[TableDef]) -> Result<MigrationPlan,
}
}

// Indexes
let from_indexes: HashMap<_, _> = from_tbl
// Indexes - use BTreeMap for consistent ordering
let from_indexes: BTreeMap<_, _> = from_tbl
.indexes
.iter()
.map(|i| (i.name.as_str(), i))
.collect();
let to_indexes: HashMap<_, _> = to_tbl
let to_indexes: BTreeMap<_, _> = to_tbl
.indexes
.iter()
.map(|i| (i.name.as_str(), i))
Expand Down
43 changes: 38 additions & 5 deletions crates/vespertide-query/src/sql/add_column.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,12 @@ use sea_query::{Alias, Expr, Query, Table, TableAlterStatement};
use vespertide_core::{ColumnDef, TableDef};

use super::create_table::build_create_table_for_backend;
use super::helpers::{build_create_enum_type_sql, build_sea_column_def};
use super::helpers::{
build_create_enum_type_sql, build_schema_statement, build_sea_column_def,
collect_sqlite_enum_check_clauses,
};
use super::rename_table::build_rename_table;
use super::types::{BuiltQuery, DatabaseBackend};
use super::types::{BuiltQuery, DatabaseBackend, RawSql};
use crate::error::QueryError;

fn build_add_column_alter_for_backend(
Expand All @@ -20,15 +23,27 @@ fn build_add_column_alter_for_backend(
.to_owned()
}

/// Check if the column type is an enum
fn is_enum_column(column: &ColumnDef) -> bool {
matches!(
column.r#type,
vespertide_core::ColumnType::Complex(vespertide_core::ComplexColumnType::Enum { .. })
)
}

pub fn build_add_column(
backend: &DatabaseBackend,
table: &str,
column: &ColumnDef,
fill_with: Option<&str>,
current_schema: &[TableDef],
) -> Result<Vec<BuiltQuery>, QueryError> {
// SQLite: only NOT NULL additions require table recreation
if *backend == DatabaseBackend::Sqlite && !column.nullable {
// SQLite: NOT NULL additions or enum columns require table recreation
// (enum columns need CHECK constraint which requires table recreation in SQLite)
let sqlite_needs_recreation =
*backend == DatabaseBackend::Sqlite && (!column.nullable || is_enum_column(column));

if sqlite_needs_recreation {
let table_def = current_schema
.iter()
.find(|t| t.name == table)
Expand All @@ -47,7 +62,25 @@ pub fn build_add_column(
&new_columns,
&table_def.constraints,
);
let create_query = BuiltQuery::CreateTable(Box::new(create_temp));

// For SQLite, add CHECK constraints for enum columns
// Use original table name for constraint naming (table will be renamed back)
let enum_check_clauses = collect_sqlite_enum_check_clauses(table, &new_columns);
let create_query = if !enum_check_clauses.is_empty() {
let base_sql = build_schema_statement(&create_temp, *backend);
let mut modified_sql = base_sql;
if let Some(pos) = modified_sql.rfind(')') {
let check_sql = enum_check_clauses.join(", ");
modified_sql.insert_str(pos, &format!(", {}", check_sql));
}
BuiltQuery::Raw(RawSql::per_backend(
modified_sql.clone(),
modified_sql.clone(),
modified_sql,
))
} else {
BuiltQuery::CreateTable(Box::new(create_temp))
};

// Copy existing data, filling new column
let mut select_query = Query::select();
Expand Down
31 changes: 28 additions & 3 deletions crates/vespertide-query/src/sql/create_table.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,11 @@ use sea_query::{Alias, ForeignKey, Index, Table, TableCreateStatement};

use vespertide_core::{ColumnDef, ColumnType, ComplexColumnType, TableConstraint};

use super::helpers::{build_create_enum_type_sql, build_sea_column_def, to_sea_fk_action};
use super::types::{BuiltQuery, DatabaseBackend};
use super::helpers::{
build_create_enum_type_sql, build_schema_statement, build_sea_column_def,
collect_sqlite_enum_check_clauses, to_sea_fk_action,
};
use super::types::{BuiltQuery, DatabaseBackend, RawSql};
use crate::error::QueryError;

pub(crate) fn build_create_table_for_backend(
Expand Down Expand Up @@ -145,7 +148,29 @@ pub fn build_create_table(
table_constraints.iter().cloned().cloned().collect();
build_create_table_for_backend(backend, table, columns, &table_constraints_owned)
};
queries.push(BuiltQuery::CreateTable(Box::new(create_table_stmt)));

// For SQLite, add CHECK constraints for enum columns
if matches!(backend, DatabaseBackend::Sqlite) {
let enum_check_clauses = collect_sqlite_enum_check_clauses(table, columns);
if !enum_check_clauses.is_empty() {
// Embed CHECK constraints into CREATE TABLE statement
let base_sql = build_schema_statement(&create_table_stmt, *backend);
let mut modified_sql = base_sql;
if let Some(pos) = modified_sql.rfind(')') {
let check_sql = enum_check_clauses.join(", ");
modified_sql.insert_str(pos, &format!(", {}", check_sql));
}
queries.push(BuiltQuery::Raw(RawSql::per_backend(
modified_sql.clone(),
modified_sql.clone(),
modified_sql,
)));
} else {
queries.push(BuiltQuery::CreateTable(Box::new(create_table_stmt)));
}
} else {
queries.push(BuiltQuery::CreateTable(Box::new(create_table_stmt)));
}

// For Postgres and SQLite, add unique constraints as separate CREATE UNIQUE INDEX statements
if matches!(backend, DatabaseBackend::Postgres | DatabaseBackend::Sqlite) {
Expand Down
37 changes: 37 additions & 0 deletions crates/vespertide-query/src/sql/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,43 @@ pub fn is_enum_type(column_type: &ColumnType) -> bool {
)
}

/// Generate CHECK constraint name for SQLite enum column
/// Format: chk_{table}_{column}
pub fn build_sqlite_enum_check_name(table: &str, column: &str) -> String {
format!("chk_{}_{}", table, column)
}

/// Generate CHECK constraint expression for SQLite enum column
/// Returns the constraint clause like: CONSTRAINT "chk_table_col" CHECK (col IN ('val1', 'val2'))
pub fn build_sqlite_enum_check_clause(
table: &str,
column: &str,
column_type: &ColumnType,
) -> Option<String> {
if let ColumnType::Complex(ComplexColumnType::Enum { values, .. }) = column_type {
let name = build_sqlite_enum_check_name(table, column);
let values_sql = values
.iter()
.map(|v| format!("'{}'", v.replace('\'', "''")))
.collect::<Vec<_>>()
.join(", ");
Some(format!(
"CONSTRAINT \"{}\" CHECK (\"{}\" IN ({}))",
name, column, values_sql
))
} else {
None
}
}

/// Collect all CHECK constraints for enum columns in a table (for SQLite)
pub fn collect_sqlite_enum_check_clauses(table: &str, columns: &[ColumnDef]) -> Vec<String> {
columns
.iter()
.filter_map(|col| build_sqlite_enum_check_clause(table, &col.name, &col.r#type))
.collect()
}

/// Extract enum name from column type if it's an enum
pub fn get_enum_name(column_type: &ColumnType) -> Option<&str> {
if let ColumnType::Complex(ComplexColumnType::Enum { name, .. }) = column_type {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,7 @@
source: crates/vespertide-query/src/sql/add_column.rs
expression: sql
---
;
ALTER TABLE "users" ADD COLUMN "status" enum_text
CREATE TABLE "users_temp" ( "id" integer NOT NULL, "status" enum_text , CONSTRAINT "chk_users_status" CHECK ("status" IN ('active', 'inactive')));
INSERT INTO "users_temp" ("id", "status") SELECT "id", NULL AS "status" FROM "users";
DROP TABLE "users";
ALTER TABLE "users_temp" RENAME TO "users"
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@ source: crates/vespertide-query/src/sql/create_table.rs
expression: sql
---
;
CREATE TABLE "users" ( "id" integer NOT NULL, "status" enum_text NOT NULL DEFAULT 'active', PRIMARY KEY ("id") )
CREATE TABLE "users" ( "id" integer NOT NULL, "status" enum_text NOT NULL DEFAULT 'active', PRIMARY KEY ("id") , CONSTRAINT "chk_users_status" CHECK ("status" IN ('active', 'inactive', 'pending')))