diff --git a/src/aggregation.jl b/src/aggregation.jl index c5e4e65..80be7d9 100644 --- a/src/aggregation.jl +++ b/src/aggregation.jl @@ -12,7 +12,7 @@ function smoothed_aggregation(A::TA, max_coarse = 10, diagonal_dominance = false, keep = false, - coarse_solver = Pinv, kwargs...) where {T,V,bs,TA<:SparseMatrixCSC{T,V}} + coarse_solver = BackslashSolver, kwargs...) where {T,V,bs,TA<:SparseMatrixCSC{T,V}} n = size(A, 1) B = isnothing(B) ? ones(T,n) : copy(B) @assert size(A, 1) == size(B, 1) diff --git a/src/classical.jl b/src/classical.jl index 3b4abc2..fde04c4 100644 --- a/src/classical.jl +++ b/src/classical.jl @@ -7,7 +7,7 @@ function ruge_stuben(_A::Union{TA, Symmetric{Ti, TA}, Hermitian{Ti, TA}}, postsmoother = GaussSeidel(), max_levels = 10, max_coarse = 10, - coarse_solver = Pinv, kwargs...) where {Ti,Tv,bs,TA<:SparseMatrixCSC{Ti,Tv}} + coarse_solver = BackslashSolver, kwargs...) where {Ti,Tv,bs,TA<:SparseMatrixCSC{Ti,Tv}} # fails if near null space `B` is provided diff --git a/src/multilevel.jl b/src/multilevel.jl index 18146e0..e80fc95 100644 --- a/src/multilevel.jl +++ b/src/multilevel.jl @@ -53,10 +53,41 @@ end abstract type CoarseSolver end +""" + BackslashSolver{T,F} <: CoarseSolver + +Coarse solver using Julia's built-in factorizations via `factorize()` and `ldiv!()`. +Much more efficient than `Pinv` as it preserves sparsity and uses appropriate +factorizations (UMFPACK, CHOLMOD, etc.) based on matrix properties. +""" +struct BackslashSolver{T,F} <: CoarseSolver + factorization::F + function BackslashSolver{T}(A) where T + # Use Julia's built-in factorize - automatically picks best method + # (UMFPACK for sparse nonsymmetric, CHOLMOD for sparse SPD, etc.) + fact = qr(A, ColumnNorm()) + new{T,typeof(fact)}(fact) + end +end +BackslashSolver(A) = BackslashSolver{eltype(A)}(A) +Base.show(io::IO, p::BackslashSolver) = print(io, "BackslashSolver") + +function (solver::BackslashSolver)(x, b) + # Handle multiple RHS efficiently + for i ∈ 1:size(b, 2) + # Use backslash - Julia's factorizations are optimized for this + x[:, i] = solver.factorization \ b[:, i] + end +end + """ Pinv{T} <: CoarseSolver Moore-Penrose pseudo inverse coarse solver. Calls `pinv` + +!!! warning "Deprecated" + This solver converts sparse matrices to dense and computes the full pseudoinverse, + which is very inefficient. Consider using `BackslashSolver` instead. """ struct Pinv{T} <: CoarseSolver pinvA::Matrix{T} diff --git a/test/runtests.jl b/test/runtests.jl index 543084c..af12e71 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,7 +1,7 @@ using SparseArrays, DelimitedFiles, Random using Test, LinearAlgebra using IterativeSolvers, LinearSolve, AlgebraicMultigrid -import AlgebraicMultigrid: Pinv, Classical +import AlgebraicMultigrid: Pinv, BackslashSolver, Classical using JLD2 using FileIO @@ -71,7 +71,30 @@ end @testset "Coarse Solver" begin A = float.(poisson(10)) b = A * ones(10) + +# Test BackslashSolver (new default, much more efficient) +x = similar(b) +BackslashSolver(A)(x, b) +@test sum(abs2, x - ones(10)) < 1e-12 # Should be more accurate than Pinv + +# Test Pinv (legacy solver, less efficient) @test sum(abs2, Pinv(A)(similar(b), b) - ones(10)) < 1e-6 + +# Test that BackslashSolver handles multiple RHS +B = hcat(b, 2*b) +X = similar(B) +BackslashSolver(A)(X, B) +@test sum(abs2, X[:, 1] - ones(10)) < 1e-12 +@test sum(abs2, X[:, 2] - 2*ones(10)) < 1e-12 + +# Test with sparse matrices of different types +for T in [Float32, Float64] + A_typed = T.(A) + b_typed = T.(b) + x_typed = similar(b_typed) + BackslashSolver(A_typed)(x_typed, b_typed) + @test sum(abs2, x_typed - ones(T, 10)) < 1e-6 +end end @testset "Multilevel" begin