Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,9 @@ SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[weakdeps]
JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb"
Reactant = "3c362404-f566-11ee-1572-e11a4b42c853"

[extensions]
DeviceSparseArraysJLArraysExt = "JLArrays"
DeviceSparseArraysReactantExt = "Reactant"

[compat]
AcceleratedKernels = "0.4"
Expand All @@ -26,6 +24,5 @@ ArrayInterface = "7"
JLArrays = "0.3"
KernelAbstractions = "0.9"
LinearAlgebra = "1"
Reactant = "0.2.164"
SparseArrays = "1"
julia = "1.10"
12 changes: 0 additions & 12 deletions ext/DeviceSparseArraysReactantExt.jl

This file was deleted.

10 changes: 1 addition & 9 deletions src/helpers.jl
Original file line number Diff line number Diff line change
@@ -1,11 +1,3 @@
#=
A method to check that an AbstractArray is of a given element type.
This is needed because we can implement new methods for different arrays (e.g., Reactant.jl)
=#
_check_type(::Type{T}, v::AbstractArray{T}) where {T} = true
_check_type(::Type{T}, v::AbstractArray) where {T} = false

_get_eltype(::AbstractArray{T}) where {T} = T

# Helper functions to call AcceleratedKernels methods
_sortperm_AK(x) = AcceleratedKernels.sortperm(x)
_cumsum_AK(x) = AcceleratedKernels.cumsum(x)
55 changes: 18 additions & 37 deletions src/matrix_coo/matrix_coo.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# DeviceSparseMatrixCOO implementation

"""
DeviceSparseMatrixCOO{Tv,Ti,RowIndT<:AbstractVector,ColIndT<:AbstractVector,NzValT<:AbstractVector} <: AbstractDeviceSparseMatrix{Tv,Ti}
DeviceSparseMatrixCOO{Tv,Ti,RowIndT<:AbstractVector{Ti},ColIndT<:AbstractVector{Ti},NzValT<:AbstractVector{Tv}} <: AbstractDeviceSparseMatrix{Tv,Ti}

Coordinate (COO) sparse matrix with generic storage vectors for row indices,
column indices, and nonzero values. Buffer types (e.g. `Vector`, GPU array
Expand All @@ -16,28 +16,29 @@ types) enable dispatch on device characteristics.
"""
struct DeviceSparseMatrixCOO{
Tv,
Ti<:Integer,
RowIndT<:AbstractVector,
ColIndT<:AbstractVector,
NzValT<:AbstractVector,
Ti,
RowIndT<:AbstractVector{Ti},
ColIndT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
} <: AbstractDeviceSparseMatrix{Tv,Ti}
m::Int
n::Int
rowind::RowIndT
colind::ColIndT
nzval::NzValT
function DeviceSparseMatrixCOO{Tv,Ti,RowIndT,ColIndT,NzValT}(

function DeviceSparseMatrixCOO(
m::Integer,
n::Integer,
rowind::RowIndT,
colind::ColIndT,
nzval::NzValT,
) where {
Tv,
Ti<:Integer,
RowIndT<:AbstractVector,
ColIndT<:AbstractVector,
NzValT<:AbstractVector,
Ti,
RowIndT<:AbstractVector{Ti},
ColIndT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
}
get_backend(rowind) == get_backend(colind) == get_backend(nzval) ||
throw(ArgumentError("All storage vectors must be on the same device/backend."))
Expand All @@ -46,39 +47,19 @@ struct DeviceSparseMatrixCOO{
n >= 0 || throw(ArgumentError("n must be non-negative"))
SparseArrays.sparse_check_Ti(m, n, Ti)

_check_type(Ti, rowind) || throw(ArgumentError("rowind must be of type $Ti"))
_check_type(Ti, colind) || throw(ArgumentError("colind must be of type $Ti"))
_check_type(Tv, nzval) || throw(ArgumentError("nzval must be of type $Tv"))

length(rowind) == length(colind) == length(nzval) ||
throw(ArgumentError("rowind, colind, and nzval must have same length"))

return new(Int(m), Int(n), rowind, colind, nzval)
return new{Tv,Ti,RowIndT,ColIndT,NzValT}(
Int(m),
Int(n),
copy(rowind),
copy(colind),
copy(nzval),
)
end
end

function DeviceSparseMatrixCOO(
m::Integer,
n::Integer,
rowind::RowIndT,
colind::ColIndT,
nzval::NzValT,
) where {
RowIndT<:AbstractVector{Ti},
ColIndT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
} where {Ti<:Integer,Tv}
Ti2 = _get_eltype(rowind)
Tv2 = _get_eltype(nzval)
DeviceSparseMatrixCOO{Tv2,Ti2,RowIndT,ColIndT,NzValT}(
m,
n,
copy(rowind),
copy(colind),
copy(nzval),
)
end

# Conversion from SparseMatrixCSC to COO
function DeviceSparseMatrixCOO(A::SparseMatrixCSC{Tv,Ti}) where {Tv,Ti}
m, n = size(A)
Expand Down
50 changes: 18 additions & 32 deletions src/matrix_csc/matrix_csc.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# DeviceSparseMatrixCSC implementation

"""
DeviceSparseMatrixCSC{Tv,Ti,ColPtrT<RowValT,NzValT} <: AbstractDeviceSparseMatrix{Tv,Ti}
DeviceSparseMatrixCSC{Tv,Ti,ColPtrT,RowValT,NzValT} <: AbstractDeviceSparseMatrix{Tv,Ti}

Compressed Sparse Column (CSC) matrix with generic storage vectors for column
pointer, row indices, and nonzero values. Buffer types (e.g. `Vector`, GPU array
Expand All @@ -16,28 +16,29 @@ types) enable dispatch on device characteristics.
"""
struct DeviceSparseMatrixCSC{
Tv,
Ti<:Integer,
ColPtrT<:AbstractVector,
RowValT<:AbstractVector,
NzValT<:AbstractVector,
Ti,
ColPtrT<:AbstractVector{Ti},
RowValT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
} <: AbstractDeviceSparseMatrix{Tv,Ti}
m::Int
n::Int
colptr::ColPtrT
rowval::RowValT
nzval::NzValT
function DeviceSparseMatrixCSC{Tv,Ti,ColPtrT,RowValT,NzValT}(

function DeviceSparseMatrixCSC(
m::Integer,
n::Integer,
colptr::ColPtrT,
rowval::RowValT,
nzval::NzValT,
) where {
Tv,
Ti<:Integer,
ColPtrT<:AbstractVector,
RowValT<:AbstractVector,
NzValT<:AbstractVector,
Ti,
ColPtrT<:AbstractVector{Ti},
RowValT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
}
get_backend(colptr) == get_backend(rowval) == get_backend(nzval) ||
throw(ArgumentError("All storage vectors must be on the same device/backend."))
Expand All @@ -47,35 +48,20 @@ struct DeviceSparseMatrixCSC{
SparseArrays.sparse_check_Ti(m, n, Ti)
# SparseArrays.sparse_check(n, colptr, rowval, nzval) # TODO: this uses scalar indexing

_check_type(Ti, colptr) || throw(ArgumentError("colptr must be of type $Ti"))
_check_type(Ti, rowval) || throw(ArgumentError("rowval must be of type $Ti"))
_check_type(Tv, nzval) || throw(ArgumentError("nzval must be of type $Tv"))

length(colptr) == n + 1 || throw(ArgumentError("colptr length must be n+1"))
length(rowval) == length(nzval) ||
throw(ArgumentError("rowval and nzval must have same length"))

return new(Int(m), Int(n), copy(colptr), copy(rowval), copy(nzval))
return new{Tv,Ti,ColPtrT,RowValT,NzValT}(
Int(m),
Int(n),
copy(colptr),
copy(rowval),
copy(nzval),
)
end
end

function DeviceSparseMatrixCSC(
m::Integer,
n::Integer,
colptr::ColPtrT,
rowval::RowValT,
nzval::NzValT,
) where {
ColPtrT<:AbstractVector{Ti},
RowValT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
} where {Ti<:Integer,Tv}
Ti2 = _get_eltype(colptr)
Tv2 = _get_eltype(nzval)
DeviceSparseMatrixCSC{Tv2,Ti2,ColPtrT,RowValT,NzValT}(m, n, colptr, rowval, nzval)
end


Adapt.adapt_structure(to, A::DeviceSparseMatrixCSC) = DeviceSparseMatrixCSC(
A.m,
A.n,
Expand Down
49 changes: 18 additions & 31 deletions src/matrix_csr/matrix_csr.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# DeviceSparseMatrixCSR implementation

"""
DeviceSparseMatrixCSR{Tv,Ti,RowPtrT<:ColValT,NzValT} <: AbstractDeviceSparseMatrix{Tv,Ti}
DeviceSparseMatrixCSR{Tv,Ti,RowPtrT,ColValT,NzValT} <: AbstractDeviceSparseMatrix{Tv,Ti}

Compressed Sparse Row (CSR) matrix with generic storage vectors for row
pointer, column indices, and nonzero values. Buffer types (e.g. `Vector`, GPU array
Expand All @@ -16,28 +16,29 @@ types) enable dispatch on device characteristics.
"""
struct DeviceSparseMatrixCSR{
Tv,
Ti<:Integer,
RowPtrT<:AbstractVector,
ColValT<:AbstractVector,
NzValT<:AbstractVector,
Ti,
RowPtrT<:AbstractVector{Ti},
ColValT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
} <: AbstractDeviceSparseMatrix{Tv,Ti}
m::Int
n::Int
rowptr::RowPtrT
colval::ColValT
nzval::NzValT
function DeviceSparseMatrixCSR{Tv,Ti,RowPtrT,ColValT,NzValT}(

function DeviceSparseMatrixCSR(
m::Integer,
n::Integer,
rowptr::RowPtrT,
colval::ColValT,
nzval::NzValT,
) where {
Tv,
Ti<:Integer,
RowPtrT<:AbstractVector,
ColValT<:AbstractVector,
NzValT<:AbstractVector,
Ti,
RowPtrT<:AbstractVector{Ti},
ColValT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
}
get_backend(rowptr) == get_backend(colval) == get_backend(nzval) ||
throw(ArgumentError("All storage vectors must be on the same device/backend."))
Expand All @@ -47,34 +48,20 @@ struct DeviceSparseMatrixCSR{
SparseArrays.sparse_check_Ti(m, n, Ti)
# SparseArrays.sparse_check(m, rowptr, colval, nzval) # TODO: this uses scalar indexing

_check_type(Ti, rowptr) || throw(ArgumentError("rowptr must be of type $Ti"))
_check_type(Ti, colval) || throw(ArgumentError("colval must be of type $Ti"))
_check_type(Tv, nzval) || throw(ArgumentError("nzval must be of type $Tv"))

length(rowptr) == m + 1 || throw(ArgumentError("rowptr length must be m+1"))
length(colval) == length(nzval) ||
throw(ArgumentError("colval and nzval must have same length"))

return new(Int(m), Int(n), copy(rowptr), copy(colval), copy(nzval))
return new{Tv,Ti,RowPtrT,ColValT,NzValT}(
Int(m),
Int(n),
copy(rowptr),
copy(colval),
copy(nzval),
)
end
end

function DeviceSparseMatrixCSR(
m::Integer,
n::Integer,
rowptr::RowPtrT,
colval::ColValT,
nzval::NzValT,
) where {
RowPtrT<:AbstractVector{Ti},
ColValT<:AbstractVector{Ti},
NzValT<:AbstractVector{Tv},
} where {Ti<:Integer,Tv}
Ti2 = _get_eltype(rowptr)
Tv2 = _get_eltype(nzval)
DeviceSparseMatrixCSR{Tv2,Ti2,RowPtrT,ColValT,NzValT}(m, n, rowptr, colval, nzval)
end

Adapt.adapt_structure(to, A::DeviceSparseMatrixCSR) = DeviceSparseMatrixCSR(
A.m,
A.n,
Expand Down
23 changes: 6 additions & 17 deletions src/vector/vector.jl
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,13 @@ on different devices. The logical length is stored along with index/value buffer

Constructors validate that the index and value vectors have matching length.
"""
struct DeviceSparseVector{
Tv,
Ti<:Integer,
IndT<:AbstractVector{Ti},
ValT<:AbstractVector{Tv},
} <: AbstractDeviceSparseVector{Tv,Ti}
struct DeviceSparseVector{Tv,Ti,IndT<:AbstractVector{Ti},ValT<:AbstractVector{Tv}} <:
AbstractDeviceSparseVector{Tv,Ti}
n::Int
nzind::IndT
nzval::ValT
function DeviceSparseVector{Tv,Ti,IndT,ValT}(

function DeviceSparseVector(
n::Integer,
nzind::IndT,
nzval::ValT,
Expand All @@ -34,17 +31,9 @@ struct DeviceSparseVector{
n >= 0 || throw(ArgumentError("The number of elements must be non-negative."))
length(nzind) == length(nzval) ||
throw(ArgumentError("index and value vectors must be the same length"))
return new(Int(n), copy(nzind), copy(nzval))
end
end

# Param inference constructor
function DeviceSparseVector(
n::Integer,
nzind::IndT,
nzval::ValT,
) where {IndT<:AbstractVector{Ti},ValT<:AbstractVector{Tv}} where {Ti<:Integer,Tv}
DeviceSparseVector{Tv,Ti,IndT,ValT}(n, nzind, nzval)
return new{Tv,Ti,IndT,ValT}(Int(n), copy(nzind), copy(nzval))
end
end

# Conversions
Expand Down
Loading