diff --git a/Project.toml b/Project.toml index f3e0981..cfa0170 100644 --- a/Project.toml +++ b/Project.toml @@ -8,6 +8,7 @@ Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" ArrayLayouts = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" BroadcastMapConversion = "4a4adec5-520f-4750-bb37-d5e66b4ddeb2" +Derive = "a07dfc7f-7d04-4eb5-84cc-a97f051f655a" Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" GradedUnitRanges = "e2de450a-8a67-46c7-b59c-01d5a3d041c5" @@ -25,11 +26,14 @@ Adapt = "4.1.1" Aqua = "0.8.9" ArrayLayouts = "1.10.4" BlockArrays = "1.2.0" +Derive = "0.3.1" Dictionaries = "0.4.3" GPUArraysCore = "0.1.0" LinearAlgebra = "1.10" MacroTools = "0.5.13" +SparseArraysBase = "0.2" SplitApplyCombine = "1.2.3" +TensorAlgebra = "0.1.0" Test = "1.10" julia = "1.10" diff --git a/README.md b/README.md index 19c582b..40fdf33 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ julia> Pkg.add("BlockSparseArrays") ````julia using BlockArrays: BlockArrays, BlockedVector, Block, blockedrange -using BlockSparseArrays: BlockSparseArray, block_stored_length +using BlockSparseArrays: BlockSparseArray, blockstoredlength using Test: @test, @test_broken function main() @@ -62,13 +62,13 @@ function main() ] b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 # Blocks with discontiguous underlying data d_blocks = randn.(nz_block_sizes) b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 # Access a block @test b[Block(1, 1)] == d_blocks[1] @@ -92,7 +92,7 @@ function main() @test b + b ≈ Array(b) + Array(b) @test b + b isa BlockSparseArray # TODO: Fix this, broken. - @test_broken block_stored_length(b + b) == 2 + @test_broken blockstoredlength(b + b) == 2 scaled_b = 2b @test scaled_b ≈ 2Array(b) diff --git a/TODO.md b/TODO.md deleted file mode 100644 index 6c4bf82..0000000 --- a/TODO.md +++ /dev/null @@ -1,68 +0,0 @@ -- Add Aqua tests. -- Turn the package extensions into actual package extensions: - - BlockSparseArraysAdaptExt - - BlockSparseArraysGradedUnitRangesExt - - BlockSparseArraysTensorAlgebraExt - -# Proposals for interfaces based on `BlockArrays.jl`, `SparseArrays`, and `BlockSparseArrays.jl` - -```julia -# BlockSparseArray interface - -# Define `eachblockindex` -eachblockindex(B::BlockArrays.AbstractBlockArray) = Iterators.product(BlockArrays.blockaxes(B)...) - -eachblockindex(B::BlockArrays.AbstractBlockArray, b::Block) # indices in a block - -blocksize(B::BlockArrays.AbstractBlockArray, b::Block) # size of a block -blocksize(axes, b::Block) # size of a block - -blocklength(B::BlockArrays.AbstractBlockArray, b::Block) # length of a block -blocklength(axes, b::Block) # length of a block - -# Other functions -BlockArrays.blocksize(B) # number of blocks in each dimension -BlockArrays.blocksizes(B) # length of blocks in each dimension - -tuple_block(Block(2, 2)) == (Block(2), Block(2)) # Block.(b.n) -blocksize(axes, b::Block) = map(axis -> length(axis[Block(b.n)]), axes) -blocksize(B, Block(2, 2)) = size(B[Block(2, 2)]) # size of a specified block - -# SparseArrays interface - -findnz(S) # outputs nonzero keys and values (SparseArrayKit.nonzero_pairs) -nonzeros(S) # vector of structural nonzeros (SparseArrayKit.nonzero_values) -nnz(S) # number of nonzero values (SparseArrayKit.nonzero_length) -rowvals(S) # row that each nonzero value in `nonzeros(S)` is in -nzrange(S, c) # range of linear indices into `nonzeros(S)` for values in column `c` -findall(!iszero, S) # CartesianIndices of numerical nonzeros -issparse(S) -sparse(A) # convert to sparse -dropzeros!(S) -droptol!(S, tol) - -# BlockSparseArrays.jl + SparseArrays - -blockfindnz(B) # outputs nonzero block indices/keys and block views -blocknonzeros(B) -blocknnz(S) -blockfindall(!iszero, B) -isblocksparse(B) -blocksparse(A) -blockdropzeros!(B) -blockdroptol!(B, tol) - -# SparseArrayKit.jl interface - -nonzero_pairs(a) # SparseArrays.findnz -nonzero_keys(a) # SparseArrays.? -nonzero_values(a) # SparseArrays.nonzeros -nonzero_length(a) # SparseArrays.nnz - -# BlockSparseArrays.jl + SparseArrayKit.jl interface - -block_nonzero_pairs -block_nonzero_keys -block_nonzero_values -block_nonzero_length -``` diff --git a/examples/README.jl b/examples/README.jl index ec6b086..7374d88 100644 --- a/examples/README.jl +++ b/examples/README.jl @@ -40,7 +40,7 @@ julia> Pkg.add("BlockSparseArrays") # ## Examples using BlockArrays: BlockArrays, BlockedVector, Block, blockedrange -using BlockSparseArrays: BlockSparseArray, block_stored_length +using BlockSparseArrays: BlockSparseArray, blockstoredlength using Test: @test, @test_broken function main() @@ -67,13 +67,13 @@ function main() ] b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 ## Blocks with discontiguous underlying data d_blocks = randn.(nz_block_sizes) b = BlockSparseArray(nz_blocks, d_blocks, i_axes) - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 ## Access a block @test b[Block(1, 1)] == d_blocks[1] @@ -97,7 +97,7 @@ function main() @test b + b ≈ Array(b) + Array(b) @test b + b isa BlockSparseArray ## TODO: Fix this, broken. - @test_broken block_stored_length(b + b) == 2 + @test_broken blockstoredlength(b + b) == 2 scaled_b = 2b @test scaled_b ≈ 2Array(b) diff --git a/ext/BlockSparseArraysGradedUnitRangesExt/src/BlockSparseArraysGradedUnitRangesExt.jl b/ext/BlockSparseArraysGradedUnitRangesExt/src/BlockSparseArraysGradedUnitRangesExt.jl index e10957e..5846ce5 100644 --- a/ext/BlockSparseArraysGradedUnitRangesExt/src/BlockSparseArraysGradedUnitRangesExt.jl +++ b/ext/BlockSparseArraysGradedUnitRangesExt/src/BlockSparseArraysGradedUnitRangesExt.jl @@ -9,11 +9,14 @@ using BlockArrays: using ..BlockSparseArrays: BlockSparseArrays, AbstractBlockSparseArray, + AbstractBlockSparseArrayInterface, AbstractBlockSparseMatrix, BlockSparseArray, + BlockSparseArrayInterface, BlockSparseMatrix, BlockSparseVector, block_merge +using Derive: @interface using GradedUnitRanges: GradedUnitRanges, AbstractGradedUnitRange, @@ -109,7 +112,7 @@ end # with mixed dual and non-dual axes. This shouldn't be needed once # GradedUnitRanges is rewritten using BlockArrays v1. # TODO: Delete this once GradedUnitRanges is rewritten. -function blocksparse_show( +@interface ::AbstractBlockSparseArrayInterface function Base.show( io::IO, mime::MIME"text/plain", a::AbstractArray, axes_a::Tuple; kwargs... ) println(io, "typeof(axes) = ", typeof(axes_a), "\n") @@ -127,7 +130,7 @@ end function Base.show(io::IO, mime::MIME"text/plain", a::BlockSparseArray; kwargs...) axes_a = axes(a) a_nondual = BlockSparseArray(blocks(a), nondual.(axes(a))) - return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...) + return @interface BlockSparseArrayInterface() show(io, mime, a_nondual, axes_a; kwargs...) end # This is a temporary fix for `show` being broken for BlockSparseArrays @@ -139,7 +142,7 @@ function Base.show( ) axes_a = axes(a) a_nondual = BlockSparseArray(blocks(a'), dual.(nondual.(axes(a'))))' - return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...) + return @interface BlockSparseArrayInterface() show(io, mime, a_nondual, axes_a; kwargs...) end # This is a temporary fix for `show` being broken for BlockSparseArrays @@ -151,6 +154,6 @@ function Base.show( ) axes_a = axes(a) a_nondual = tranpose(BlockSparseArray(transpose(blocks(a)), nondual.(axes(a)))) - return blocksparse_show(io, mime, a_nondual, axes_a; kwargs...) + return @interface BlockSparseArrayInterface() show(io, mime, a_nondual, axes_a; kwargs...) end end diff --git a/ext/BlockSparseArraysGradedUnitRangesExt/test/Project.toml b/ext/BlockSparseArraysGradedUnitRangesExt/test/Project.toml index d1bf575..7934869 100644 --- a/ext/BlockSparseArraysGradedUnitRangesExt/test/Project.toml +++ b/ext/BlockSparseArraysGradedUnitRangesExt/test/Project.toml @@ -1,4 +1,3 @@ [deps] BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/ext/BlockSparseArraysGradedUnitRangesExt/test/runtests.jl b/ext/BlockSparseArraysGradedUnitRangesExt/test/runtests.jl index b800a81..4822d45 100644 --- a/ext/BlockSparseArraysGradedUnitRangesExt/test/runtests.jl +++ b/ext/BlockSparseArraysGradedUnitRangesExt/test/runtests.jl @@ -2,7 +2,7 @@ using Test: @test, @testset using BlockArrays: AbstractBlockArray, Block, BlockedOneTo, blockedrange, blocklengths, blocksize -using BlockSparseArrays: BlockSparseArray, block_stored_length +using BlockSparseArrays: BlockSparseArray, blockstoredlength using GradedUnitRanges: GradedUnitRanges, GradedOneTo, @@ -13,7 +13,7 @@ using GradedUnitRanges: gradedrange, isdual using LabelledNumbers: label -using SparseArraysBase: stored_length +using SparseArraysBase: storedlength using SymmetrySectors: U1 using TensorAlgebra: fusedims, splitdims using LinearAlgebra: adjoint @@ -40,8 +40,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test size(b) == (4, 4, 4, 4) @test blocksize(b) == (2, 2, 2, 2) @test blocklengths.(axes(b)) == ([2, 2], [2, 2], [2, 2], [2, 2]) - @test stored_length(b) == 32 - @test block_stored_length(b) == 2 + @test storedlength(b) == 32 + @test blockstoredlength(b) == 2 for i in 1:ndims(a) @test axes(b, i) isa GradedOneTo end @@ -58,8 +58,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test size(b) == (4, 4, 4, 4) @test blocksize(b) == (2, 2, 2, 2) @test blocklengths.(axes(b)) == ([2, 2], [2, 2], [2, 2], [2, 2]) - @test stored_length(b) == 256 - @test block_stored_length(b) == 16 + @test storedlength(b) == 256 + @test blockstoredlength(b) == 16 for i in 1:ndims(a) @test axes(b, i) isa BlockedOneTo{Int} end @@ -71,8 +71,8 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) b = a[2:3, 2:3, 2:3, 2:3] @test size(b) == (2, 2, 2, 2) @test blocksize(b) == (2, 2, 2, 2) - @test stored_length(b) == 2 - @test block_stored_length(b) == 2 + @test storedlength(b) == 2 + @test blockstoredlength(b) == 2 for i in 1:ndims(a) @test axes(b, i) isa GradedOneTo end @@ -156,7 +156,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedOneTo @@ -177,7 +177,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedUnitRange @@ -204,7 +204,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedUnitRangeDual @@ -229,7 +229,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == 2 * Array(a) for i in 1:2 @test axes(b, i) isa GradedUnitRangeDual @@ -255,7 +255,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == 2 * Array(a) @test a[:, :] isa BlockSparseArray for i in 1:2 @@ -280,7 +280,7 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) a[i] = randn(elt, size(a[i])) end b = 2 * a' - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == 2 * Array(a)' for ax in axes(b) @test ax isa typeof(dual(r)) diff --git a/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl b/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl index 3d4b145..a734ded 100644 --- a/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl +++ b/ext/BlockSparseArraysTensorAlgebraExt/src/BlockSparseArraysTensorAlgebraExt.jl @@ -1,6 +1,6 @@ module BlockSparseArraysTensorAlgebraExt using BlockArrays: AbstractBlockedUnitRange -using ..BlockSparseArrays: AbstractBlockSparseArray, block_reshape +using ..BlockSparseArrays: AbstractBlockSparseArray, blockreshape using GradedUnitRanges: tensor_product using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion @@ -13,12 +13,12 @@ TensorAlgebra.FusionStyle(::AbstractBlockedUnitRange) = BlockReshapeFusion() function TensorAlgebra.fusedims( ::BlockReshapeFusion, a::AbstractArray, axes::AbstractUnitRange... ) - return block_reshape(a, axes) + return blockreshape(a, axes) end function TensorAlgebra.splitdims( ::BlockReshapeFusion, a::AbstractArray, axes::AbstractUnitRange... ) - return block_reshape(a, axes) + return blockreshape(a, axes) end end diff --git a/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml b/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml index d1bf575..7934869 100644 --- a/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml +++ b/ext/BlockSparseArraysTensorAlgebraExt/test/Project.toml @@ -1,4 +1,3 @@ [deps] BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/src/BlockArraysExtensions/BlockArraysExtensions.jl b/src/BlockArraysExtensions/BlockArraysExtensions.jl index 0639dee..3208508 100644 --- a/src/BlockArraysExtensions/BlockArraysExtensions.jl +++ b/src/BlockArraysExtensions/BlockArraysExtensions.jl @@ -21,31 +21,39 @@ using BlockArrays: findblockindex using Dictionaries: Dictionary, Indices using GradedUnitRanges: blockedunitrange_getindices, to_blockindices -using SparseArraysBase: SparseArraysBase, stored_length, stored_indices +using SparseArraysBase: + SparseArraysBase, + eachstoredindex, + getunstoredindex, + isstored, + setunstoredindex!, + storedlength # A return type for `blocks(array)` when `array` isn't blocked. # Represents a vector with just that single block. struct SingleBlockView{T,N,Array<:AbstractArray{T,N}} <: AbstractArray{T,N} array::Array end +Base.parent(a::SingleBlockView) = a.array blocks_maybe_single(a) = blocks(a) blocks_maybe_single(a::Array) = SingleBlockView(a) function Base.getindex(a::SingleBlockView{<:Any,N}, index::Vararg{Int,N}) where {N} @assert all(isone, index) - return a.array + return parent(a) end # A wrapper around a potentially blocked array that is not blocked. struct NonBlockedArray{T,N,Array<:AbstractArray{T,N}} <: AbstractArray{T,N} array::Array end -Base.size(a::NonBlockedArray) = size(a.array) -Base.getindex(a::NonBlockedArray{<:Any,N}, I::Vararg{Integer,N}) where {N} = a.array[I...] +Base.parent(a::NonBlockedArray) = a.array +Base.size(a::NonBlockedArray) = size(parent(a)) +Base.getindex(a::NonBlockedArray{<:Any,N}, I::Vararg{Integer,N}) where {N} = parent(a)[I...] # Views of `NonBlockedArray`/`NonBlockedVector` are eager. # This fixes an issue in Julia 1.11 where reindexing defaults to using views. # TODO: Maybe reconsider this design, and allows views to work in slicing. Base.view(a::NonBlockedArray, I...) = a[I...] -BlockArrays.blocks(a::NonBlockedArray) = SingleBlockView(a.array) +BlockArrays.blocks(a::NonBlockedArray) = SingleBlockView(parent(a)) const NonBlockedVector{T,Array} = NonBlockedArray{T,1,Array} NonBlockedVector(array::AbstractVector) = NonBlockedArray(array) @@ -96,14 +104,14 @@ Base.view(S::BlockIndices, i) = S[i] # @view b[Block(1, 1)[1:2, 2:2]] # ``` # This is similar to the definition: -# blocksparse_to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}}) +# @interface BlockSparseArrayInterface() to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}}) function Base.getindex( a::NonBlockedVector{<:Integer,<:BlockIndices}, I::UnitRange{<:Integer} ) - ax = only(axes(a.array.indices)) + ax = only(axes(parent(a).indices)) brs = to_blockindices(ax, I) inds = blockedunitrange_getindices(ax, I) - return NonBlockedVector(a.array[BlockSlice(brs, inds)]) + return NonBlockedVector(parent(a)[BlockSlice(brs, inds)]) end function Base.getindex(S::BlockIndices, i::BlockSlice{<:BlockRange{1}}) @@ -256,20 +264,20 @@ function blocks_to_cartesianindices(d::Dictionary{<:Block}) return Dictionary(blocks_to_cartesianindices(eachindex(d)), d) end -function block_reshape(a::AbstractArray, dims::Tuple{Vararg{Vector{Int}}}) - return block_reshape(a, blockedrange.(dims)) +function blockreshape(a::AbstractArray, dims::Tuple{Vararg{Vector{Int}}}) + return blockreshape(a, blockedrange.(dims)) end -function block_reshape(a::AbstractArray, dims::Vararg{Vector{Int}}) - return block_reshape(a, dims) +function blockreshape(a::AbstractArray, dims::Vararg{Vector{Int}}) + return blockreshape(a, dims) end tuple_oneto(n) = ntuple(identity, n) -function block_reshape(a::AbstractArray, axes::Tuple{Vararg{AbstractUnitRange}}) +function blockreshape(a::AbstractArray, axes::Tuple{Vararg{AbstractUnitRange}}) reshaped_blocks_a = reshape(blocks(a), blocklength.(axes)) reshaped_a = similar(a, axes) - for I in stored_indices(reshaped_blocks_a) + for I in eachstoredindex(reshaped_blocks_a) block_size_I = map(i -> length(axes[i][Block(I[i])]), tuple_oneto(length(axes))) # TODO: Better converter here. reshaped_a[Block(Tuple(I))] = reshape(reshaped_blocks_a[I], block_size_I) @@ -277,8 +285,8 @@ function block_reshape(a::AbstractArray, axes::Tuple{Vararg{AbstractUnitRange}}) return reshaped_a end -function block_reshape(a::AbstractArray, axes::Vararg{AbstractUnitRange}) - return block_reshape(a, axes) +function blockreshape(a::AbstractArray, axes::Vararg{AbstractUnitRange}) + return blockreshape(a, axes) end function cartesianindices(axes::Tuple, b::Block) @@ -465,10 +473,6 @@ function findblocks(axis::AbstractUnitRange, range::AbstractUnitRange) return findblock(axis, first(range)):findblock(axis, last(range)) end -function block_stored_indices(a::AbstractArray) - return Block.(Tuple.(stored_indices(blocks(a)))) -end - _block(indices) = block(indices) _block(indices::CartesianIndices) = Block(ntuple(Returns(1), ndims(indices))) @@ -511,42 +515,47 @@ struct BlockView{T,N,Array<:AbstractArray{T,N}} <: AbstractArray{T,N} array::Array block::Tuple{Vararg{Block{1,Int},N}} end +Base.parent(a::BlockView) = a.array function Base.axes(a::BlockView) # TODO: Try to avoid conversion to `Base.OneTo{Int}`, or just convert # the element type to `Int` with `Int.(...)`. - # When the axes of `a.array` are `GradedOneTo`, the block is `LabelledUnitRange`, + # When the axes of `parent(a)` are `GradedOneTo`, the block is `LabelledUnitRange`, # which has element type `LabelledInteger`. That causes conversion problems # in some generic Base Julia code, for example when printing `BlockView`. return ntuple(ndims(a)) do dim - return Base.OneTo{Int}(only(axes(axes(a.array, dim)[a.block[dim]]))) + return Base.OneTo{Int}(only(axes(axes(parent(a), dim)[a.block[dim]]))) end end function Base.size(a::BlockView) return length.(axes(a)) end function Base.getindex(a::BlockView{<:Any,N}, index::Vararg{Int,N}) where {N} - return blocks(a.array)[Int.(a.block)...][index...] + return blocks(parent(a))[Int.(a.block)...][index...] end function Base.setindex!(a::BlockView{<:Any,N}, value, index::Vararg{Int,N}) where {N} - blocks(a.array)[Int.(a.block)...] = blocks(a.array)[Int.(a.block)...] - blocks(a.array)[Int.(a.block)...][index...] = value + I = Int.(a.block) + if !isstored(blocks(parent(a)), I...) + unstored_value = getunstoredindex(blocks(parent(a)), I...) + setunstoredindex!(blocks(parent(a)), unstored_value, I...) + end + blocks(parent(a))[I...][index...] = value return a end -function SparseArraysBase.stored_length(a::BlockView) +function SparseArraysBase.storedlength(a::BlockView) # TODO: Store whether or not the block is stored already as # a Bool in `BlockView`. I = CartesianIndex(Int.(a.block)) - # TODO: Use `block_stored_indices`. - if I ∈ stored_indices(blocks(a.array)) - return stored_length(blocks(a.array)[I]) + # TODO: Use `eachblockstoredindex`. + if I ∈ eachstoredindex(blocks(parent(a))) + return storedlength(blocks(parent(a))[I]) end return 0 end ## # Allow more fine-grained control: ## function ArrayLayouts.sub_materialize(layout, a::BlockView, ax) -## return blocks(a.array)[Int.(a.block)...] +## return blocks(parent(a))[Int.(a.block)...] ## end ## function ArrayLayouts.sub_materialize(layout, a::BlockView) ## return sub_materialize(layout, a, axes(a)) @@ -555,7 +564,7 @@ end ## return sub_materialize(MemoryLayout(a), a) ## end function ArrayLayouts.sub_materialize(a::BlockView) - return blocks(a.array)[Int.(a.block)...] + return blocks(parent(a))[Int.(a.block)...] end function view!(a::AbstractArray{<:Any,N}, index::Block{N}) where {N} @@ -580,7 +589,8 @@ function view!(a::AbstractArray{<:Any,N}, index::Vararg{BlockIndexRange{1},N}) w end using MacroTools: @capture -using SparseArraysBase: is_getindex_expr +is_getindex_expr(expr::Expr) = (expr.head === :ref) +is_getindex_expr(x) = false macro view!(expr) if !is_getindex_expr(expr) error("@view must be used with getindex syntax (as `@view! a[i,j,...]`)") diff --git a/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl b/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl index 69e740e..7d104de 100644 --- a/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl +++ b/src/BlockArraysSparseArraysBaseExt/BlockArraysSparseArraysBaseExt.jl @@ -1,11 +1,11 @@ using BlockArrays: AbstractBlockArray, BlocksView -using SparseArraysBase: SparseArraysBase, stored_length +using SparseArraysBase: SparseArraysBase, storedlength -function SparseArraysBase.stored_length(a::AbstractBlockArray) - return sum(b -> stored_length(b), blocks(a); init=zero(Int)) +function SparseArraysBase.storedlength(a::AbstractBlockArray) + return sum(b -> storedlength(b), blocks(a); init=zero(Int)) end # TODO: Handle `BlocksView` wrapping a sparse array? -function SparseArraysBase.storage_indices(a::BlocksView) +function SparseArraysBase.eachstoredindex(a::BlocksView) return CartesianIndices(a) end diff --git a/src/abstractblocksparsearray/abstractblocksparsearray.jl b/src/abstractblocksparsearray/abstractblocksparsearray.jl index 7de1a2d..c3297c1 100644 --- a/src/abstractblocksparsearray/abstractblocksparsearray.jl +++ b/src/abstractblocksparsearray/abstractblocksparsearray.jl @@ -1,10 +1,5 @@ using BlockArrays: BlockArrays, AbstractBlockArray, Block, BlockIndex, BlockedUnitRange, blocks -using SparseArraysBase: sparse_getindex, sparse_setindex! - -# TODO: Delete this. This function was replaced -# by `stored_length` but is still used in `NDTensors`. -function nonzero_keys end abstract type AbstractBlockSparseArray{T,N} <: AbstractBlockArray{T,N} end @@ -24,12 +19,12 @@ end # Specialized in order to fix ambiguity error with `BlockArrays`. function Base.getindex(a::AbstractBlockSparseArray{<:Any,N}, I::Vararg{Int,N}) where {N} - return blocksparse_getindex(a, I...) + return @interface BlockSparseArrayInterface() getindex(a, I...) end # Specialized in order to fix ambiguity error with `BlockArrays`. function Base.getindex(a::AbstractBlockSparseArray{<:Any,0}) - return blocksparse_getindex(a) + return @interface BlockSparseArrayInterface() getindex(a) end ## # Fix ambiguity error with `BlockArrays`. @@ -44,7 +39,7 @@ end ## ## # Fix ambiguity error with `BlockArrays`. ## function Base.getindex(a::AbstractBlockSparseArray, I::Vararg{AbstractVector}) -## ## return blocksparse_getindex(a, I...) +## ## return @interface BlockSparseArrayInterface() getindex(a, I...) ## return ArrayLayouts.layout_getindex(a, I...) ## end @@ -52,13 +47,13 @@ end function Base.setindex!( a::AbstractBlockSparseArray{<:Any,N}, value, I::Vararg{Int,N} ) where {N} - blocksparse_setindex!(a, value, I...) + @interface BlockSparseArrayInterface() setindex!(a, value, I...) return a end # Fix ambiguity error. function Base.setindex!(a::AbstractBlockSparseArray{<:Any,0}, value) - blocksparse_setindex!(a, value) + @interface BlockSparseArrayInterface() setindex!(a, value) return a end diff --git a/src/abstractblocksparsearray/cat.jl b/src/abstractblocksparsearray/cat.jl index 3023037..635be13 100644 --- a/src/abstractblocksparsearray/cat.jl +++ b/src/abstractblocksparsearray/cat.jl @@ -1,7 +1,6 @@ -# TODO: Change to `AnyAbstractBlockSparseArray`. +using Derive: @interface, interface + +# TODO: Define with `@derive`. function Base.cat(as::AnyAbstractBlockSparseArray...; dims) - # TODO: Use `sparse_cat` instead, currently - # that erroneously allocates too many blocks that are - # zero and shouldn't be stored. - return blocksparse_cat(as...; dims) + return @interface interface(as...) cat(as...; dims) end diff --git a/src/abstractblocksparsearray/map.jl b/src/abstractblocksparsearray/map.jl index 2500509..93ba526 100644 --- a/src/abstractblocksparsearray/map.jl +++ b/src/abstractblocksparsearray/map.jl @@ -1,22 +1,14 @@ using ArrayLayouts: LayoutArray using BlockArrays: blockisequal +using Derive: @interface, interface using LinearAlgebra: Adjoint, Transpose -using SparseArraysBase: - SparseArraysBase, - SparseArrayStyle, - sparse_map!, - sparse_copy!, - sparse_copyto!, - sparse_permutedims!, - sparse_mapreduce, - sparse_iszero, - sparse_isreal +using SparseArraysBase: SparseArraysBase, SparseArrayStyle # Returns `Vector{<:CartesianIndices}` function union_stored_blocked_cartesianindices(as::Vararg{AbstractArray}) combined_axes = combine_axes(axes.(as)...) stored_blocked_cartesianindices_as = map(as) do a - return blocked_cartesianindices(axes(a), combined_axes, block_stored_indices(a)) + return blocked_cartesianindices(axes(a), combined_axes, eachblockstoredindex(a)) end return ∪(stored_blocked_cartesianindices_as...) end @@ -57,13 +49,14 @@ function reblock( return @view parent(a)[map(I -> Vector(I.blocks), parentindices(a))...] end +# TODO: Move to `blocksparsearrayinterface/map.jl`. # TODO: Rewrite this so that it takes the blocking structure # made by combining the blocking of the axes (i.e. the blocking that # is used to determine `union_stored_blocked_cartesianindices(...)`). # `reblock` is a partial solution to that, but a bit ad-hoc. -# TODO: Move to `blocksparsearrayinterface/map.jl`. -function SparseArraysBase.sparse_map!( - ::BlockSparseArrayStyle, f, a_dest::AbstractArray, a_srcs::Vararg{AbstractArray} +## TODO: Make this an `@interface AbstractBlockSparseArrayInterface` function. +@interface ::AbstractBlockSparseArrayInterface function Base.map!( + f, a_dest::AbstractArray, a_srcs::AbstractArray... ) a_dest, a_srcs = reblock(a_dest), reblock.(a_srcs) for I in union_stored_blocked_cartesianindices(a_dest, a_srcs...) @@ -88,12 +81,30 @@ function SparseArraysBase.sparse_map!( return a_dest end -# TODO: Implement this. -# function SparseArraysBase.sparse_mapreduce(::BlockSparseArrayStyle, f, a_dest::AbstractArray, a_srcs::Vararg{AbstractArray}) -# end +# TODO: Move to `blocksparsearrayinterface/map.jl`. +@interface ::AbstractBlockSparseArrayInterface function Base.mapreduce( + f, op, as::AbstractArray...; kwargs... +) + # TODO: Define an `init` value based on the element type. + return @interface interface(blocks.(as)...) mapreduce( + block -> mapreduce(f, op, block), op, blocks.(as)...; kwargs... + ) +end -function Base.map!(f, a_dest::AbstractArray, a_srcs::Vararg{AnyAbstractBlockSparseArray}) - sparse_map!(f, a_dest, a_srcs...) +# TODO: Move to `blocksparsearrayinterface/map.jl`. +@interface ::AbstractBlockSparseArrayInterface function Base.iszero(a::AbstractArray) + # TODO: Just call `iszero(blocks(a))`? + return @interface interface(blocks(a)) iszero(blocks(a)) +end + +# TODO: Move to `blocksparsearrayinterface/map.jl`. +@interface ::AbstractBlockSparseArrayInterface function Base.isreal(a::AbstractArray) + # TODO: Just call `isreal(blocks(a))`? + return @interface interface(blocks(a)) isreal(blocks(a)) +end + +function Base.map!(f, a_dest::AbstractArray, a_srcs::AnyAbstractBlockSparseArray...) + @interface interface(a_srcs...) map!(f, a_dest, a_srcs...) return a_dest end @@ -102,50 +113,42 @@ function Base.map(f, as::Vararg{AnyAbstractBlockSparseArray}) end function Base.copy!(a_dest::AbstractArray, a_src::AnyAbstractBlockSparseArray) - sparse_copy!(a_dest, a_src) - return a_dest + return @interface interface(a_src) copy!(a_dest, a_src) end function Base.copyto!(a_dest::AbstractArray, a_src::AnyAbstractBlockSparseArray) - sparse_copyto!(a_dest, a_src) - return a_dest + return @interface interface(a_src) copyto!(a_dest, a_src) end # Fix ambiguity error function Base.copyto!(a_dest::LayoutArray, a_src::AnyAbstractBlockSparseArray) - sparse_copyto!(a_dest, a_src) - return a_dest + return @interface interface(a_src) copyto!(a_dest, a_src) end function Base.copyto!( a_dest::AbstractMatrix, a_src::Transpose{T,<:AbstractBlockSparseMatrix{T}} ) where {T} - sparse_copyto!(a_dest, a_src) - return a_dest + return @interface interface(a_src) copyto!(a_dest, a_src) end function Base.copyto!( a_dest::AbstractMatrix, a_src::Adjoint{T,<:AbstractBlockSparseMatrix{T}} ) where {T} - sparse_copyto!(a_dest, a_src) - return a_dest + return @interface interface(a_src) copyto!(a_dest, a_src) end function Base.permutedims!(a_dest, a_src::AnyAbstractBlockSparseArray, perm) - sparse_permutedims!(a_dest, a_src, perm) - return a_dest + return @interface interface(a_src) permutedims!(a_dest, a_src, perm) end -function Base.mapreduce(f, op, as::Vararg{AnyAbstractBlockSparseArray}; kwargs...) - return sparse_mapreduce(f, op, as...; kwargs...) +function Base.mapreduce(f, op, as::AnyAbstractBlockSparseArray...; kwargs...) + return @interface interface(as...) mapreduce(f, op, as...; kwargs...) end -# TODO: Why isn't this calling `mapreduce` already? function Base.iszero(a::AnyAbstractBlockSparseArray) - return sparse_iszero(blocks(a)) + return @interface interface(a) iszero(a) end -# TODO: Why isn't this calling `mapreduce` already? function Base.isreal(a::AnyAbstractBlockSparseArray) - return sparse_isreal(blocks(a)) + return @interface interface(a) isreal(a) end diff --git a/src/abstractblocksparsearray/sparsearrayinterface.jl b/src/abstractblocksparsearray/sparsearrayinterface.jl index b0b8aad..bbb008a 100644 --- a/src/abstractblocksparsearray/sparsearrayinterface.jl +++ b/src/abstractblocksparsearray/sparsearrayinterface.jl @@ -1,7 +1,9 @@ using BlockArrays: Block -using SparseArraysBase: SparseArraysBase, sparse_storage, stored_indices +using SparseArraysBase: SparseArraysBase, eachstoredindex, storedlength, storedvalues # Structure storing the block sparse storage +# TODO: Delete this in favor of `storedvalues(blocks(a))`, +# and rename `storedblocks(a)` and/or `eachstoredblock(a)`. struct BlockSparseStorage{Arr<:AbstractBlockSparseArray} array::Arr end @@ -11,7 +13,7 @@ function blockindex_to_cartesianindex(a::AbstractArray, blockindex) end function Base.keys(s::BlockSparseStorage) - stored_blockindices = Iterators.map(stored_indices(blocks(s.array))) do I + stored_blockindices = Iterators.map(eachstoredindex(blocks(s.array))) do I block_axes = axes(blocks(s.array)[I]) blockindices = Block(Tuple(I))[block_axes...] return Iterators.map( @@ -29,10 +31,12 @@ function Base.iterate(s::BlockSparseStorage, args...) return iterate(values(s), args...) end -function SparseArraysBase.sparse_storage(a::AbstractBlockSparseArray) - return BlockSparseStorage(a) -end +## TODO: Bring back this deifinition but check that it makes sense. +## function SparseArraysBase.storedvaluese(a::AbstractBlockSparseArray) +## return BlockSparseStorage(a) +## end -function SparseArraysBase.stored_length(a::AnyAbstractBlockSparseArray) - return sum(stored_length, sparse_storage(blocks(a)); init=zero(Int)) +# TODO: Turn this into an `@interface ::AbstractBlockSparseArrayInterface` function. +function SparseArraysBase.storedlength(a::AnyAbstractBlockSparseArray) + return sum(storedlength, storedvalues(blocks(a)); init=zero(Int)) end diff --git a/src/abstractblocksparsearray/views.jl b/src/abstractblocksparsearray/views.jl index 39872e0..9c22ab9 100644 --- a/src/abstractblocksparsearray/views.jl +++ b/src/abstractblocksparsearray/views.jl @@ -39,7 +39,7 @@ function Base.view( }, I::Block{N}, ) where {N} - return blocksparse_view(a, I) + return @interface BlockSparseArrayInterface() view(a, I) end function Base.view( a::SubArray{ @@ -47,13 +47,13 @@ function Base.view( }, I::Vararg{Block{1},N}, ) where {N} - return blocksparse_view(a, I...) + return @interface BlockSparseArrayInterface() view(a, I...) end function Base.view( V::SubArray{<:Any,1,<:AnyAbstractBlockSparseArray,<:Tuple{BlockSlice{<:BlockRange{1}}}}, I::Block{1}, ) - return blocksparse_view(a, I) + return @interface BlockSparseArrayInterface() view(a, I) end # Specialized code for getting the view of a block. @@ -63,13 +63,13 @@ function BlockArrays.viewblock( return viewblock(a, Tuple(block)...) end -# TODO: Define `blocksparse_viewblock`. +# TODO: Define `@interface BlockSparseArrayInterface() viewblock`. function BlockArrays.viewblock( a::AbstractBlockSparseArray{<:Any,N}, block::Vararg{Block{1},N} ) where {N} I = CartesianIndex(Int.(block)) - # TODO: Use `block_stored_indices`. - if I ∈ stored_indices(blocks(a)) + # TODO: Use `eachblockstoredindex`. + if I ∈ eachstoredindex(blocks(a)) return blocks(a)[I] end return BlockView(a, block) @@ -177,16 +177,16 @@ end # XXX: TODO: Distinguish if a sub-view of the block needs to be taken! # Define a new `SubBlockSlice` which is used in: -# `blocksparse_to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}})` +# `@interface BlockSparseArrayInterface() to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}})` # in `blocksparsearrayinterface/blocksparsearrayinterface.jl`. -# TODO: Define `blocksparse_viewblock`. +# TODO: Define `@interface BlockSparseArrayInterface() viewblock`. function BlockArrays.viewblock( a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockSliceCollection,N}}}, block::Vararg{Block{1},N}, ) where {T,N} I = CartesianIndex(Int.(block)) - # TODO: Use `block_stored_indices`. - if I ∈ stored_indices(blocks(a)) + # TODO: Use `eachblockstoredindex`. + if I ∈ eachstoredindex(blocks(a)) return blocks(a)[I] end return BlockView(parent(a), Block.(Base.reindex(parentindices(blocks(a)), Tuple(I)))) @@ -220,7 +220,7 @@ function BlockArrays.viewblock( return @view parent(a)[brs...] end -# TODO: Define `blocksparse_viewblock`. +# TODO: Define `@interface BlockSparseArrayInterface() viewblock`. function BlockArrays.viewblock( a::SubArray{ T, @@ -257,7 +257,7 @@ function BlockArrays.viewblock( ) where {T,N} return viewblock(a, to_tuple(block)...) end -# TODO: Define `blocksparse_viewblock`. +# TODO: Define `@interface BlockSparseArrayInterface() viewblock`. function BlockArrays.viewblock( a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockedSlice,N}}}, I::Vararg{Block{1},N}, @@ -271,7 +271,7 @@ function BlockArrays.viewblock( end return @view parent(a)[brs...] end -# TODO: Define `blocksparse_viewblock`. +# TODO: Define `@interface BlockSparseArrayInterface() viewblock`. function BlockArrays.viewblock( a::SubArray{T,N,<:AbstractBlockSparseArray{T,N},<:Tuple{Vararg{BlockedSlice,N}}}, block::Vararg{BlockIndexRange{1},N}, diff --git a/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl b/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl index 20ef541..730353b 100644 --- a/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl +++ b/src/abstractblocksparsearray/wrappedabstractblocksparsearray.jl @@ -1,4 +1,5 @@ using Adapt: Adapt, WrappedArray +using ArrayLayouts: zero! using BlockArrays: BlockArrays, AbstractBlockVector, @@ -8,6 +9,7 @@ using BlockArrays: blockedrange, mortar, unblock +using Derive: Derive, @interface using SplitApplyCombine: groupcount using TypeParameterAccessors: similartype @@ -20,18 +22,20 @@ const AnyAbstractBlockSparseArray{T,N} = Union{ <:AbstractBlockSparseArray{T,N},<:WrappedAbstractBlockSparseArray{T,N} } +Derive.interface(::Type{<:AnyAbstractBlockSparseArray}) = BlockSparseArrayInterface() + # a[1:2, 1:2] function Base.to_indices( a::AnyAbstractBlockSparseArray, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}} ) - return blocksparse_to_indices(a, inds, I) + return @interface BlockSparseArrayInterface() to_indices(a, inds, I) end # a[[Block(2), Block(1)], [Block(2), Block(1)]] function Base.to_indices( a::AnyAbstractBlockSparseArray, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}} ) - return blocksparse_to_indices(a, inds, I) + return @interface BlockSparseArrayInterface() to_indices(a, inds, I) end # a[BlockVector([Block(2), Block(1)], [2]), BlockVector([Block(2), Block(1)], [2])] @@ -41,7 +45,7 @@ function Base.to_indices( inds, I::Tuple{AbstractBlockVector{<:Block{1}},Vararg{Any}}, ) - return blocksparse_to_indices(a, inds, I) + return @interface BlockSparseArrayInterface() to_indices(a, inds, I) end # a[mortar([Block(1)[1:2], Block(2)[1:3]])] @@ -50,7 +54,7 @@ function Base.to_indices( inds, I::Tuple{BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}},Vararg{Any}}, ) - return blocksparse_to_indices(a, inds, I) + return @interface BlockSparseArrayInterface() to_indices(a, inds, I) end # a[[Block(1)[1:2], Block(2)[1:2]], [Block(1)[1:2], Block(2)[1:2]]] @@ -61,14 +65,16 @@ function Base.to_indices( end # BlockArrays `AbstractBlockArray` interface -BlockArrays.blocks(a::AnyAbstractBlockSparseArray) = blocksparse_blocks(a) +function BlockArrays.blocks(a::AnyAbstractBlockSparseArray) + @interface BlockSparseArrayInterface() blocks(a) +end # Fix ambiguity error with `BlockArrays` using BlockArrays: BlockSlice function BlockArrays.blocks( a::SubArray{<:Any,<:Any,<:AbstractBlockSparseArray,<:Tuple{Vararg{BlockSlice}}} ) - return blocksparse_blocks(a) + return @interface BlockSparseArrayInterface() blocks(a) end using TypeParameterAccessors: parenttype @@ -101,7 +107,7 @@ function Base.getindex(a::AnyAbstractBlockSparseArray{<:Any,0}) return ArrayLayouts.layout_getindex(a) end -# TODO: Define `blocksparse_isassigned`. +# TODO: Define `@interface BlockSparseArrayInterface() isassigned`. function Base.isassigned( a::AnyAbstractBlockSparseArray{<:Any,N}, index::Vararg{Block{1},N} ) where {N} @@ -117,7 +123,7 @@ function Base.isassigned(a::AnyAbstractBlockSparseArray{<:Any,N}, index::Block{N return isassigned(a, Tuple(index)...) end -# TODO: Define `blocksparse_isassigned`. +# TODO: Define `@interface BlockSparseArrayInterface() isassigned`. function Base.isassigned( a::AnyAbstractBlockSparseArray{<:Any,N}, index::Vararg{BlockIndex{1},N} ) where {N} @@ -128,33 +134,25 @@ end function Base.setindex!( a::AnyAbstractBlockSparseArray{<:Any,N}, value, I::BlockIndex{N} ) where {N} - blocksparse_setindex!(a, value, I) + # TODO: Use `@interface interface(a) setindex!(...)`. + @interface BlockSparseArrayInterface() setindex!(a, value, I) return a end # Fixes ambiguity error with BlockArrays.jl function Base.setindex!(a::AnyAbstractBlockSparseArray{<:Any,1}, value, I::BlockIndex{1}) - blocksparse_setindex!(a, value, I) + # TODO: Use `@interface interface(a) setindex!(...)`. + @interface BlockSparseArrayInterface() setindex!(a, value, I) return a end -function Base.fill!(a::AbstractBlockSparseArray, value) - if iszero(value) - # This drops all of the blocks. - sparse_zero!(blocks(a)) - return a - end - blocksparse_fill!(a, value) - return a +# TODO: Use `@derive`. +function ArrayLayouts.zero!(a::AnyAbstractBlockSparseArray) + return @interface interface(a) zero!(a) end +# TODO: Use `@derive`. function Base.fill!(a::AnyAbstractBlockSparseArray, value) - # TODO: Even if `iszero(value)`, this doesn't drop - # blocks from `a`, and additionally allocates - # new blocks filled with zeros, unlike - # `fill!(a::AbstractBlockSparseArray, value)`. - # Consider changing that behavior when possible. - blocksparse_fill!(a, value) - return a + return @interface interface(a) fill!(a, value) end # Needed by `BlockArrays` matrix multiplication interface @@ -216,29 +214,51 @@ function blocksparse_similar(a, elt::Type, axes::Tuple) undef, axes ) end +@interface ::AbstractBlockSparseArrayInterface function Base.similar( + a::AbstractArray, elt::Type, axes::Tuple{Vararg{Int}} +) + return blocksparse_similar(a, elt, axes) +end +@interface ::AbstractBlockSparseArrayInterface function Base.similar( + a::AbstractArray, elt::Type, axes::Tuple +) + return blocksparse_similar(a, elt, axes) +end +@interface ::AbstractBlockSparseArrayInterface function Base.similar( + a::Type{<:AbstractArray}, elt::Type, axes::Tuple{Vararg{Int}} +) + return blocksparse_similar(a, elt, axes) +end +@interface ::AbstractBlockSparseArrayInterface function Base.similar( + a::Type{<:AbstractArray}, elt::Type, axes::Tuple +) + return blocksparse_similar(a, elt, axes) +end # Needed by `BlockArrays` matrix multiplication interface -# TODO: Define a `blocksparse_similar` function. +# TODO: Define a `@interface BlockSparseArrayInterface() similar` function. function Base.similar( arraytype::Type{<:AnyAbstractBlockSparseArray}, elt::Type, axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, ) - return blocksparse_similar(arraytype, elt, axes) + return @interface BlockSparseArrayInterface() similar(arraytype, elt, axes) end -# TODO: Define a `blocksparse_similar` function. +# TODO: Define a `@interface BlockSparseArrayInterface() similar` function. function Base.similar( a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{Vararg{AbstractUnitRange{<:Integer}}}, ) - return blocksparse_similar(a, elt, axes) + # TODO: Use `@interface interface(a) similar(...)`. + return @interface BlockSparseArrayInterface() similar(a, elt, axes) end # Fixes ambiguity error. function Base.similar(a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{}) - return blocksparse_similar(a, elt, axes) + # TODO: Use `@interface interface(a) similar(...)`. + return @interface BlockSparseArrayInterface() similar(a, elt, axes) end # Fixes ambiguity error with `BlockArrays`. @@ -249,7 +269,8 @@ function Base.similar( AbstractBlockedUnitRange{<:Integer},Vararg{AbstractBlockedUnitRange{<:Integer}} }, ) - return blocksparse_similar(a, elt, axes) + # TODO: Use `@interface interface(a) similar(...)`. + return @interface BlockSparseArrayInterface() similar(a, elt, axes) end # Fixes ambiguity error with `OffsetArrays`. @@ -258,7 +279,8 @@ function Base.similar( elt::Type, axes::Tuple{AbstractUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) - return blocksparse_similar(a, elt, axes) + # TODO: Use `@interface interface(a) similar(...)`. + return @interface BlockSparseArrayInterface() similar(a, elt, axes) end # Fixes ambiguity error with `BlockArrays`. @@ -267,7 +289,8 @@ function Base.similar( elt::Type, axes::Tuple{AbstractBlockedUnitRange{<:Integer},Vararg{AbstractUnitRange{<:Integer}}}, ) - return blocksparse_similar(a, elt, axes) + # TODO: Use `@interface interface(a) similar(...)`. + return @interface BlockSparseArrayInterface() similar(a, elt, axes) end # Fixes ambiguity errors with BlockArrays. @@ -280,14 +303,16 @@ function Base.similar( Vararg{AbstractUnitRange{<:Integer}}, }, ) - return blocksparse_similar(a, elt, axes) + # TODO: Use `@interface interface(a) similar(...)`. + return @interface BlockSparseArrayInterface() similar(a, elt, axes) end # Fixes ambiguity error with `StaticArrays`. function Base.similar( a::AnyAbstractBlockSparseArray, elt::Type, axes::Tuple{Base.OneTo,Vararg{Base.OneTo}} ) - return blocksparse_similar(a, elt, axes) + # TODO: Use `@interface interface(a) similar(...)`. + return @interface BlockSparseArrayInterface() similar(a, elt, axes) end # TODO: Implement this in a more generic way using a smarter `copyto!`, diff --git a/src/backup/qr.jl b/src/backup/qr.jl deleted file mode 100644 index c480398..0000000 --- a/src/backup/qr.jl +++ /dev/null @@ -1,143 +0,0 @@ -using .SparseArraysBase: SparseArrayDOK - -# Check if the matrix has 1 or fewer entries -# per row/column. -function is_permutation_matrix(a::SparseMatrixCSC) - return all(col -> length(nzrange(a, col)) ≤ 1, axes(a, 2)) -end - -# Check if the matrix has 1 or fewer entries -# per row/column. -function is_permutation_matrix(a::SparseArrayDOK{<:Any,2}) - keys = collect(Iterators.map(Tuple, nonzero_keys(a))) - I = first.(keys) - J = last.(keys) - return allunique(I) && allunique(J) -end - -function findnonzerorows(a::SparseMatrixCSC, col) - return view(a.rowval, a.colptr[col]:(a.colptr[col + 1] - 1)) -end - -# TODO: Is this already defined? -function SparseArrays.SparseMatrixCSC(a::SparseArrayDOK{<:Any,2}) - # Not defined: - # a_csc = SparseMatrixCSC{eltype(a)}(size(a)) - a_csc = spzeros(eltype(a), size(a)) - for I in nonzero_keys(a) - a_csc[I] = a[I] - end - return a_csc -end - -# TODO: Is this already defined? -# Get the sparse structure of a SparseArray as a SparseMatrixCSC. -function sparse_structure( - structure_type::Type{<:SparseMatrixCSC}, a::SparseArrayDOK{<:Any,2} -) - # Idealy would work but a bit too complicated for `map` right now: - # return SparseMatrixCSC(map(x -> iszero(x) ? false : true, a)) - # TODO: Change to `spzeros(Bool, size(a))`. - a_structure = structure_type(spzeros(Bool, size(a)...)) - for I in nonzero_keys(a) - i, j = Tuple(I) - a_structure[i, j] = true - end - return a_structure -end - -# Get the sparsity structure as a `SparseMatrixCSC` with values -# of `true` where there are structural nonzero blocks and `false` -# otherwise. -function block_sparse_structure(structure_type::Type, a::BlockSparseArray{<:Any,2}) - return sparse_structure(structure_type, blocks(a)) -end - -function is_block_permutation_matrix(a::BlockSparseArray{<:Any,2}) - return is_permutation_matrix(blocks(a)) -end - -qr_rank(alg::Algorithm"thin", a::AbstractArray{<:Any,2}) = minimum(size(a)) - -# m × n → (m × min(m, n)) ⋅ (min(m, n) × n) -function qr_block_sparse_structure(alg::Algorithm"thin", a::BlockSparseArray{<:Any,2}) - axes_row, axes_col = axes(a) - a_csc = block_sparse_structure(SparseMatrixCSC, a) - F = qr(float(a_csc)) - # Outputs full Q - # q_csc = sparse(F.Q[invperm(F.prow), :]) - q_csc = (F.Q * sparse(I, size(a_csc, 1), minimum(size(a_csc))))[invperm(F.prow), :] - r_csc = F.R[:, invperm(F.pcol)] - nblocks = size(q_csc, 2) - @assert nblocks == size(r_csc, 1) - a_sparse = blocks(a) - blocklengths_qr = Vector{Int}(undef, nblocks) - for I in nonzero_keys(a_sparse) - i, k = Tuple(I) - # Get the nonzero columns associated - # with the given row. - j = only(findnonzerorows(r_csc, k)) - # @assert is_structural_nonzero(r, j, k) - # @assert is_structural_nonzero(q, i, j) - blocklengths_qr[j] = qr_rank(alg, @view(a[BlockArrays.Block(i, k)])) - end - axes_qr = blockedrange(blocklengths_qr) - axes_q = (axes(a, 1), axes_qr) - axes_r = (axes_qr, axes(a, 2)) - # TODO: Come up with a better format to ouput. - # TODO: Get `axes_qr` as a permutation of the - # axes of `axes(a, 2)` to preserve sectors - # when using symmetric tensors. - return q_csc, axes_q, r_csc, axes_r -end - -# m × n → (m × m) ⋅ (m × n) -function qr_block_sparse_structure(alg::Algorithm"full", a::BlockSparseArray{<:Any,2}) - return error("Not implemented") -end - -function qr_blocks(a, structure_r, block_a) - i, k = block_a.n - j = only(findnonzerorows(structure_r, k)) - return BlockArrays.Block(i, j), BlockArrays.Block(j, k) -end - -# Block-preserving QR. -function LinearAlgebra.qr(a::BlockSparseArray{<:Any,2}; alg="thin") - return qr(Algorithm(alg), a) -end - -# Block-preserving QR. -function LinearAlgebra.qr(alg::Algorithm, a::BlockSparseArray{<:Any,2}) - if !is_block_permutation_matrix(a) - # Must have 1 or fewer blocks per row/column. - println("Block sparsity structure is:") - display(nonzero_blockkeys(a)) - error("Not a block permutation matrix") - end - eltype_a = eltype(a) - # TODO: `structure_q` isn't needed. - structure_q, axes_q, structure_r, axes_r = qr_block_sparse_structure(alg, a) - # TODO: Make this generic to GPU, use `similar`. - q = BlockSparseArray{eltype_a}(axes_q) - r = BlockSparseArray{eltype_a}(axes_r) - for block_a in nonzero_blockkeys(a) - # TODO: Make thin or full depending on `alg`. - q_b, r_b = qr(a[block_a]) - # Determine the block of Q and R - # TODO: Do the block locations change for `alg="full"`? - block_q, block_r = qr_blocks(a, structure_r, block_a) - - # TODO Make this generic to GPU. - q[block_q] = Matrix(q_b) - r[block_r] = r_b - end - # TODO: If `alg="full"`, fill in blocks of `q` - # with random unitaries. - # Which blocks should be filled? Seems to be based - # on the QNs... - # Maybe fill diagonal blocks. - # TODO: Also store `structure_r` in some way - # since that is needed for permuting the QNs. - return q, r -end diff --git a/src/blocksparsearray/blocksparsearray.jl b/src/blocksparsearray/blocksparsearray.jl index 3b3bbae..93b2bad 100644 --- a/src/blocksparsearray/blocksparsearray.jl +++ b/src/blocksparsearray/blocksparsearray.jl @@ -1,4 +1,5 @@ using BlockArrays: BlockArrays, Block, BlockedUnitRange, blockedrange, blocklength +using Derive: @interface using Dictionaries: Dictionary using SparseArraysBase: SparseArrayDOK @@ -169,7 +170,8 @@ Base.axes(a::BlockSparseArray) = a.axes # BlockArrays `AbstractBlockArray` interface. # This is used by `blocks(::AnyAbstractBlockSparseArray)`. -blocksparse_blocks(a::BlockSparseArray) = a.blocks +@interface ::AbstractBlockSparseArrayInterface BlockArrays.blocks(a::BlockSparseArray) = + a.blocks # TODO: Use `TypeParameterAccessors`. function blockstype( diff --git a/src/blocksparsearrayinterface/arraylayouts.jl b/src/blocksparsearrayinterface/arraylayouts.jl index 3ec48d5..a4a3cab 100644 --- a/src/blocksparsearrayinterface/arraylayouts.jl +++ b/src/blocksparsearrayinterface/arraylayouts.jl @@ -1,21 +1,16 @@ using ArrayLayouts: ArrayLayouts, Dot, MatMulMatAdd, MatMulVecAdd, MulAdd -using BlockArrays: BlockLayout +using BlockArrays: BlockArrays, BlockLayout, muladd! +using Derive: @interface using SparseArraysBase: SparseLayout -using LinearAlgebra: dot, mul! +using LinearAlgebra: LinearAlgebra, dot, mul! -function blocksparse_muladd!( +@interface ::AbstractBlockSparseArrayInterface function BlockArrays.muladd!( α::Number, a1::AbstractArray, a2::AbstractArray, β::Number, a_dest::AbstractArray ) mul!(blocks(a_dest), blocks(a1), blocks(a2), α, β) return a_dest end -function blocksparse_matmul!(m::MulAdd) - α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C - blocksparse_muladd!(α, a1, a2, β, a_dest) - return a_dest -end - function ArrayLayouts.materialize!( m::MatMulMatAdd{ <:BlockLayout{<:SparseLayout}, @@ -23,7 +18,8 @@ function ArrayLayouts.materialize!( <:BlockLayout{<:SparseLayout}, }, ) - blocksparse_matmul!(m) + α, a1, a2, β, a_dest = m.α, m.A, m.B, m.β, m.C + @interface BlockSparseArrayInterface() muladd!(m.α, m.A, m.B, m.β, m.C) return m.C end function ArrayLayouts.materialize!( @@ -33,16 +29,18 @@ function ArrayLayouts.materialize!( <:BlockLayout{<:SparseLayout}, }, ) - blocksparse_matmul!(m) + @interface BlockSparseArrayInterface() matmul!(m) return m.C end -function blocksparse_dot(a1::AbstractArray, a2::AbstractArray) +@interface ::AbstractBlockSparseArrayInterface function LinearAlgebra.dot( + a1::AbstractArray, a2::AbstractArray +) # TODO: Add a check that the blocking of `a1` and `a2` are # the same, or the same up to a reshape. return dot(blocks(a1), blocks(a2)) end function Base.copy(d::Dot{<:BlockLayout{<:SparseLayout},<:BlockLayout{<:SparseLayout}}) - return blocksparse_dot(d.A, d.B) + return @interface BlockSparseArrayInterface() dot(d.A, d.B) end diff --git a/src/blocksparsearrayinterface/blocksparsearrayinterface.jl b/src/blocksparsearrayinterface/blocksparsearrayinterface.jl index af2a9cd..a3e391e 100644 --- a/src/blocksparsearrayinterface/blocksparsearrayinterface.jl +++ b/src/blocksparsearrayinterface/blocksparsearrayinterface.jl @@ -1,4 +1,6 @@ +using ArrayLayouts: ArrayLayouts, zero! using BlockArrays: + BlockArrays, AbstractBlockVector, Block, BlockIndex, @@ -12,20 +14,48 @@ using BlockArrays: blocklengths, blocks, findblockindex +using Derive: Derive, @interface using LinearAlgebra: Adjoint, Transpose -using SparseArraysBase: perm, iperm, stored_length, sparse_zero! +using SparseArraysBase: + AbstractSparseArrayInterface, eachstoredindex, perm, iperm, storedlength, storedvalues -blocksparse_blocks(a::AbstractArray) = error("Not implemented") +# Like `SparseArraysBase.eachstoredindex` but +# at the block level, i.e. iterates over the +# stored block locations. +function eachblockstoredindex(a::AbstractArray) + # TODO: Use `Iterators.map`. + return Block.(Tuple.(eachstoredindex(blocks(a)))) +end + +# Like `BlockArrays.eachblock` but only iterating +# over stored blocks. +function eachstoredblock(a::AbstractArray) + return storedvalues(blocks(a)) +end + +abstract type AbstractBlockSparseArrayInterface <: AbstractSparseArrayInterface end + +# TODO: Also support specifying the `blocktype` along with the `eltype`. +Derive.arraytype(::AbstractBlockSparseArrayInterface, T::Type) = BlockSparseArray{T} + +struct BlockSparseArrayInterface <: AbstractBlockSparseArrayInterface end + +@interface ::AbstractBlockSparseArrayInterface BlockArrays.blocks(a::AbstractArray) = + error("Not implemented") blockstype(a::AbstractArray) = blockstype(typeof(a)) -function blocksparse_getindex(a::AbstractArray{<:Any,N}, I::Vararg{Int,N}) where {N} +@interface ::AbstractBlockSparseArrayInterface function Base.getindex( + a::AbstractArray{<:Any,N}, I::Vararg{Int,N} +) where {N} @boundscheck checkbounds(a, I...) return a[findblockindex.(axes(a), I)...] end # Fix ambiguity error. -function blocksparse_getindex(a::AbstractArray{<:Any,0}) +@interface ::AbstractBlockSparseArrayInterface function Base.getindex( + a::AbstractArray{<:Any,0} +) # TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430 # is fixed. return a[BlockIndex{0,Tuple{},Tuple{}}((), ())] @@ -37,14 +67,16 @@ end # and make that explicit with `@blocked a[1:2, 1:2]`. See the discussion in # https://github.com/JuliaArrays/BlockArrays.jl/issues/347 and also # https://github.com/ITensor/ITensors.jl/issues/1336. -function blocksparse_to_indices(a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}}) +@interface ::AbstractBlockSparseArrayInterface function Base.to_indices( + a, inds, I::Tuple{UnitRange{<:Integer},Vararg{Any}} +) bs1 = to_blockindices(inds[1], I[1]) I1 = BlockSlice(bs1, blockedunitrange_getindices(inds[1], I[1])) return (I1, to_indices(a, Base.tail(inds), Base.tail(I))...) end # Special case when there is no blocking. -function blocksparse_to_indices( +@interface ::AbstractBlockSparseArrayInterface function Base.to_indices( a, inds::Tuple{Base.OneTo{<:Integer},Vararg{Any}}, I::Tuple{UnitRange{<:Integer},Vararg{Any}}, @@ -53,14 +85,16 @@ function blocksparse_to_indices( end # a[[Block(2), Block(1)], [Block(2), Block(1)]] -function blocksparse_to_indices(a, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}}) +@interface ::AbstractBlockSparseArrayInterface function Base.to_indices( + a, inds, I::Tuple{Vector{<:Block{1}},Vararg{Any}} +) I1 = BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) return (I1, to_indices(a, Base.tail(inds), Base.tail(I))...) end # a[mortar([Block(1)[1:2], Block(2)[1:3]]), mortar([Block(1)[1:2], Block(2)[1:3]])] # a[[Block(1)[1:2], Block(2)[1:3]], [Block(1)[1:2], Block(2)[1:3]]] -function blocksparse_to_indices( +@interface ::AbstractBlockSparseArrayInterface function Base.to_indices( a, inds, I::Tuple{BlockVector{<:BlockIndex{1},<:Vector{<:BlockIndexRange{1}}},Vararg{Any}} ) I1 = BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) @@ -70,7 +104,7 @@ end # a[BlockVector([Block(2), Block(1)], [2]), BlockVector([Block(2), Block(1)], [2])] # Permute and merge blocks. # TODO: This isn't merging blocks yet, that needs to be implemented that. -function blocksparse_to_indices( +@interface ::AbstractBlockSparseArrayInterface function Base.to_indices( a, inds, I::Tuple{AbstractBlockVector{<:Block{1}},Vararg{Any}} ) I1 = BlockIndices(I[1], blockedunitrange_getindices(inds[1], I[1])) @@ -80,21 +114,27 @@ end # TODO: Need to implement this! function block_merge end -function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::Vararg{Int,N}) where {N} +@interface ::AbstractBlockSparseArrayInterface function Base.setindex!( + a::AbstractArray{<:Any,N}, value, I::Vararg{Int,N} +) where {N} @boundscheck checkbounds(a, I...) a[findblockindex.(axes(a), I)...] = value return a end # Fix ambiguity error. -function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value) +@interface ::AbstractBlockSparseArrayInterface function Base.setindex!( + a::AbstractArray{<:Any,0}, value +) # TODO: Use `Block()[]` once https://github.com/JuliaArrays/BlockArrays.jl/issues/430 # is fixed. a[BlockIndex{0,Tuple{},Tuple{}}((), ())] = value return a end -function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N}) where {N} +@interface ::AbstractBlockSparseArrayInterface function Base.setindex!( + a::AbstractArray{<:Any,N}, value, I::BlockIndex{N} +) where {N} i = Int.(Tuple(block(I))) a_b = blocks(a)[i...] a_b[I.α...] = value @@ -104,7 +144,9 @@ function blocksparse_setindex!(a::AbstractArray{<:Any,N}, value, I::BlockIndex{N end # Fix ambiguity error. -function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value, I::BlockIndex{0}) +@interface ::AbstractBlockSparseArrayInterface function Base.setindex!( + a::AbstractArray{<:Any,0}, value, I::BlockIndex{0} +) a_b = blocks(a)[] a_b[] = value # Set the block, required if it is structurally zero. @@ -112,32 +154,29 @@ function blocksparse_setindex!(a::AbstractArray{<:Any,0}, value, I::BlockIndex{0 return a end -function blocksparse_fill!(a::AbstractArray, value) +@interface ::AbstractBlockSparseArrayInterface function Base.fill!(a::AbstractArray, value) + # TODO: Only do this check if `value isa Number`? + if iszero(value) + zero!(a) + return a + end + # TODO: Maybe use `map` over `blocks(a)` or something + # like that. for b in BlockRange(a) - # We can't use: - # ```julia - # a[b] .= value - # ``` - # since that would lead to a stack overflow, - # because broadcasting calls `fill!`. - - # TODO: Ideally we would use: - # ```julia - # @view!(a[b]) .= value - # ``` - # but that doesn't work on `SubArray` right now. - - # This line is needed to instantiate blocks - # that aren't instantiated yet. Maybe - # we can make this work without this line? - blocks(a)[Int.(Tuple(b))...] = blocks(a)[Int.(Tuple(b))...] - blocks(a)[Int.(Tuple(b))...] .= value + a[b] .= value end return a end -function block_stored_length(a::AbstractArray) - return stored_length(blocks(a)) +@interface ::AbstractBlockSparseArrayInterface function ArrayLayouts.zero!(a::AbstractArray) + # This will try to empty the storage if possible. + zero!(blocks(a)) + return a +end + +# TODO: Rename to `blockstoredlength`. +function blockstoredlength(a::AbstractArray) + return storedlength(blocks(a)) end # BlockArrays @@ -156,7 +195,9 @@ struct SparsePermutedDimsArrayBlocks{ } <: AbstractSparseArray{BlockType,N} array::Array end -function blocksparse_blocks(a::PermutedDimsArray) +@interface ::AbstractBlockSparseArrayInterface function BlockArrays.blocks( + a::PermutedDimsArray +) return SparsePermutedDimsArrayBlocks{eltype(a),ndims(a),blocktype(parent(a)),typeof(a)}(a) end function Base.size(a::SparsePermutedDimsArrayBlocks) @@ -169,24 +210,27 @@ function Base.getindex( blocks(parent(a.array))[_getindices(index, _invperm(a.array))...], _perm(a.array) ) end -function SparseArraysBase.stored_indices(a::SparsePermutedDimsArrayBlocks) - return map(I -> _getindices(I, _perm(a.array)), stored_indices(blocks(parent(a.array)))) +function SparseArraysBase.eachstoredindex(a::SparsePermutedDimsArrayBlocks) + return map(I -> _getindices(I, _perm(a.array)), eachstoredindex(blocks(parent(a.array)))) end # TODO: Either make this the generic interface or define # `SparseArraysBase.sparse_storage`, which is used # to defined this. -function SparseArraysBase.stored_length(a::SparsePermutedDimsArrayBlocks) - return length(stored_indices(a)) -end -function SparseArraysBase.sparse_storage(a::SparsePermutedDimsArrayBlocks) - return error("Not implemented") +function SparseArraysBase.storedlength(a::SparsePermutedDimsArrayBlocks) + return length(eachstoredindex(a)) end +## TODO: Delete. +## function SparseArraysBase.sparse_storage(a::SparsePermutedDimsArrayBlocks) +## return error("Not implemented") +## end reverse_index(index) = reverse(index) reverse_index(index::CartesianIndex) = CartesianIndex(reverse(Tuple(index))) -blocksparse_blocks(a::Transpose) = transpose(blocks(parent(a))) -blocksparse_blocks(a::Adjoint) = adjoint(blocks(parent(a))) +@interface ::AbstractBlockSparseArrayInterface BlockArrays.blocks(a::Transpose) = + transpose(blocks(parent(a))) +@interface ::AbstractBlockSparseArrayInterface BlockArrays.blocks(a::Adjoint) = + adjoint(blocks(parent(a))) # Represents the array of arrays of a `SubArray` # wrapping a block spare array, i.e. `blocks(array)` where `a` is a `SubArray`. @@ -194,7 +238,7 @@ struct SparseSubArrayBlocks{T,N,BlockType<:AbstractArray{T,N},Array<:SubArray{T, AbstractSparseArray{BlockType,N} array::Array end -function blocksparse_blocks(a::SubArray) +@interface ::AbstractBlockSparseArrayInterface function BlockArrays.blocks(a::SubArray) return SparseSubArrayBlocks{eltype(a),ndims(a),blocktype(parent(a)),typeof(a)}(a) end # TODO: Define this as `blockrange(a::AbstractArray, indices::Tuple{Vararg{AbstractUnitRange}})`. @@ -240,35 +284,44 @@ function Base.isassigned(a::SparseSubArrayBlocks{<:Any,N}, I::Vararg{Int,N}) whe # TODO: Implement this properly. return true end -function SparseArraysBase.stored_indices(a::SparseSubArrayBlocks) - return stored_indices(view(blocks(parent(a.array)), blockrange(a)...)) +function SparseArraysBase.eachstoredindex(a::SparseSubArrayBlocks) + return eachstoredindex(view(blocks(parent(a.array)), blockrange(a)...)) end # TODO: Either make this the generic interface or define # `SparseArraysBase.sparse_storage`, which is used # to defined this. -SparseArraysBase.stored_length(a::SparseSubArrayBlocks) = length(stored_indices(a)) +SparseArraysBase.storedlength(a::SparseSubArrayBlocks) = length(eachstoredindex(a)) ## struct SparseSubArrayBlocksStorage{Array<:SparseSubArrayBlocks} ## array::Array ## end -function SparseArraysBase.sparse_storage(a::SparseSubArrayBlocks) - return map(I -> a[I], stored_indices(a)) -end -function SparseArraysBase.getindex_zero_function(a::SparseSubArrayBlocks) - # TODO: Base it off of `getindex_zero_function(blocks(parent(a.array))`, but replace the - # axes with `axes(a.array)`. - return BlockZero(axes(a.array)) +## TODO: Delete. +## function SparseArraysBase.sparse_storage(a::SparseSubArrayBlocks) +## return map(I -> a[I], eachstoredindex(a)) +## end + +## TODO: Delete. +## function SparseArraysBase.getindex_zero_function(a::SparseSubArrayBlocks) +## # TODO: Base it off of `getindex_zero_function(blocks(parent(a.array))`, but replace the +## # axes with `axes(a.array)`. +## return BlockZero(axes(a.array)) +## end + +function SparseArraysBase.getunstoredindex( + a::SparseSubArrayBlocks{<:Any,N}, I::Vararg{Int,N} +) where {N} + return error("Not implemented.") end to_blocks_indices(I::BlockSlice{<:BlockRange{1}}) = Int.(I.block) to_blocks_indices(I::BlockIndices{<:Vector{<:Block{1}}}) = Int.(I.blocks) -function blocksparse_blocks( +@interface ::AbstractBlockSparseArrayInterface function BlockArrays.blocks( a::SubArray{<:Any,<:Any,<:Any,<:Tuple{Vararg{BlockSliceCollection}}} ) return @view blocks(parent(a))[map(to_blocks_indices, parentindices(a))...] end using BlockArrays: BlocksView -SparseArraysBase.stored_length(a::BlocksView) = length(a) +SparseArraysBase.storedlength(a::BlocksView) = length(a) diff --git a/src/blocksparsearrayinterface/blockzero.jl b/src/blocksparsearrayinterface/blockzero.jl index 25665ac..286823f 100644 --- a/src/blocksparsearrayinterface/blockzero.jl +++ b/src/blocksparsearrayinterface/blockzero.jl @@ -18,28 +18,32 @@ struct BlockZero{Axes} axes::Axes end -function (f::BlockZero)(a::AbstractArray, I) - return f(eltype(a), I) +function (f::BlockZero)(a::AbstractArray, I...) + return f(eltype(a), I...) end -function (f::BlockZero)(arraytype::Type{<:SubArray{<:Any,<:Any,P}}, I) where {P} - return f(P, I) +function (f::BlockZero)(arraytype::Type{<:SubArray{<:Any,<:Any,P}}, I...) where {P} + return f(P, I...) end -function (f::BlockZero)(arraytype::Type{<:AbstractArray}, I) +function (f::BlockZero)(arraytype::Type{<:AbstractArray}, I::CartesianIndex) + return f(arraytype, Tuple(I)...) +end + +function (f::BlockZero)(arraytype::Type{<:AbstractArray}, I::Int...) # TODO: Make sure this works for sparse or block sparse blocks, immutable # blocks, diagonal blocks, etc.! - blck_size = block_size(f.axes, Block(Tuple(I))) + blck_size = block_size(f.axes, Block(I)) blck_type = similartype(arraytype, blck_size) return fill!(blck_type(undef, blck_size), false) end # Fallback so that `SparseArray` with scalar elements works. -function (f::BlockZero)(blocktype::Type{<:Number}, I) +function (f::BlockZero)(blocktype::Type{<:Number}, I...) return zero(blocktype) end # Fallback to Array if it is abstract -function (f::BlockZero)(arraytype::Type{AbstractArray{T,N}}, I) where {T,N} - return f(Array{T,N}, I) +function (f::BlockZero)(arraytype::Type{AbstractArray{T,N}}, I...) where {T,N} + return f(Array{T,N}, I...) end diff --git a/src/blocksparsearrayinterface/broadcast.jl b/src/blocksparsearrayinterface/broadcast.jl index 7028d29..18f7ba4 100644 --- a/src/blocksparsearrayinterface/broadcast.jl +++ b/src/blocksparsearrayinterface/broadcast.jl @@ -1,7 +1,12 @@ using Base.Broadcast: BroadcastStyle, AbstractArrayStyle, DefaultArrayStyle, Broadcasted using BroadcastMapConversion: map_function, map_args +using Derive: Derive, @interface -struct BlockSparseArrayStyle{N} <: AbstractArrayStyle{N} end +abstract type AbstractBlockSparseArrayStyle{N} <: AbstractArrayStyle{N} end + +Derive.interface(::Type{<:AbstractBlockSparseArrayStyle}) = BlockSparseArrayInterface() + +struct BlockSparseArrayStyle{N} <: AbstractBlockSparseArrayStyle{N} end # Define for new sparse array types. # function Broadcast.BroadcastStyle(arraytype::Type{<:MyBlockSparseArray}) @@ -29,11 +34,12 @@ function Base.similar(bc::Broadcasted{<:BlockSparseArrayStyle}, elt::Type) end # Broadcasting implementation +# TODO: Delete this in favor of `Derive` version. function Base.copyto!( dest::AbstractArray{<:Any,N}, bc::Broadcasted{BlockSparseArrayStyle{N}} ) where {N} # convert to map # flatten and only keep the AbstractArray arguments - sparse_map!(map_function(bc), dest, map_args(bc)...) + @interface interface(bc) map!(map_function(bc), dest, map_args(bc)...) return dest end diff --git a/src/blocksparsearrayinterface/cat.jl b/src/blocksparsearrayinterface/cat.jl index 0115033..a4e9399 100644 --- a/src/blocksparsearrayinterface/cat.jl +++ b/src/blocksparsearrayinterface/cat.jl @@ -1,26 +1,16 @@ using BlockArrays: AbstractBlockedUnitRange, blockedrange, blocklengths -using SparseArraysBase: SparseArraysBase, allocate_cat_output, sparse_cat! +using Derive: Derive, @interface, cat! +using SparseArraysBase: SparseArraysBase -# TODO: Maybe move to `SparseArraysBaseBlockArraysExt`. +# TODO: Maybe move to `DeriveBlockArraysExt`. # TODO: Handle dual graded unit ranges, for example in a new `SparseArraysBaseGradedUnitRangesExt`. -function SparseArraysBase.axis_cat( - a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange -) +function Derive.axis_cat(a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange) return blockedrange(vcat(blocklengths(a1), blocklengths(a2))) end -# that erroneously allocates too many blocks that are -# zero and shouldn't be stored. -function blocksparse_cat!(a_dest::AbstractArray, as::AbstractArray...; dims) - sparse_cat!(blocks(a_dest), blocks.(as)...; dims) - return a_dest -end - -# TODO: Delete this in favor of `sparse_cat`, currently -# that erroneously allocates too many blocks that are -# zero and shouldn't be stored. -function blocksparse_cat(as::AbstractArray...; dims) - a_dest = allocate_cat_output(as...; dims) - blocksparse_cat!(a_dest, as...; dims) +@interface ::AbstractBlockSparseArrayInterface function Derive.cat!( + a_dest::AbstractArray, as::AbstractArray...; dims +) + cat!(blocks(a_dest), blocks.(as)...; dims) return a_dest end diff --git a/src/blocksparsearrayinterface/linearalgebra.jl b/src/blocksparsearrayinterface/linearalgebra.jl index ac7f566..702e761 100644 --- a/src/blocksparsearrayinterface/linearalgebra.jl +++ b/src/blocksparsearrayinterface/linearalgebra.jl @@ -1,6 +1,6 @@ -using LinearAlgebra: mul! +using LinearAlgebra: LinearAlgebra, mul! -function blocksparse_mul!( +@interface ::AbstractBlockSparseArrayInterface function LinearAlgebra.mul!( a_dest::AbstractMatrix, a1::AbstractMatrix, a2::AbstractMatrix, diff --git a/src/blocksparsearrayinterface/map.jl b/src/blocksparsearrayinterface/map.jl index 2d537cd..ebbfba4 100644 --- a/src/blocksparsearrayinterface/map.jl +++ b/src/blocksparsearrayinterface/map.jl @@ -7,7 +7,7 @@ function map_stored_blocks(f, a::AbstractArray) # TODO: `block_stored_indices` should output `Indices` storing # the stored Blocks, not a `Dictionary` from cartesian indices # to Blocks. - bs = collect(block_stored_indices(a)) + bs = collect(eachblockstoredindex(a)) ds = map(b -> f(@view(a[b])), bs) # We manually specify the block type using `Base.promote_op` # since `a[b]` may not be inferrable. For example, if `blocktype(a)` diff --git a/src/blocksparsearrayinterface/views.jl b/src/blocksparsearrayinterface/views.jl index 8e43f26..56d17b0 100644 --- a/src/blocksparsearrayinterface/views.jl +++ b/src/blocksparsearrayinterface/views.jl @@ -1,3 +1,3 @@ -function blocksparse_view(a, I...) +@interface ::AbstractBlockSparseArrayInterface function Base.view(a, I...) return Base.invoke(view, Tuple{AbstractArray,Vararg{Any}}, a, I...) end diff --git a/test/Project.toml b/test/Project.toml index dec0051..1bc98ec 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -1,14 +1,16 @@ [deps] +Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" +ArrayLayouts = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" BlockSparseArrays = "2c9a651f-6452-4ace-a6ac-809f4280fbb4" BroadcastMapConversion = "4a4adec5-520f-4750-bb37-d5e66b4ddeb2" +Derive = "a07dfc7f-7d04-4eb5-84cc-a97f051f655a" GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527" GradedUnitRanges = "e2de450a-8a67-46c7-b59c-01d5a3d041c5" JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb" LabelledNumbers = "f856a3a6-4152-4ec4-b2a7-02c1a55d7993" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" NestedPermutedDimsArrays = "2c2a8ec4-3cfc-4276-aa3e-1307b4294e58" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" diff --git a/test/TestBlockSparseArraysUtils.jl b/test/basics/TestBlockSparseArraysUtils.jl similarity index 100% rename from test/TestBlockSparseArraysUtils.jl rename to test/basics/TestBlockSparseArraysUtils.jl diff --git a/test/test_basics.jl b/test/basics/test_basics.jl similarity index 86% rename from test/test_basics.jl rename to test/basics/test_basics.jl index 0f2692c..def25c8 100644 --- a/test/test_basics.jl +++ b/test/basics/test_basics.jl @@ -1,4 +1,5 @@ -@eval module $(gensym()) +using Adapt: adapt +using ArrayLayouts: zero! using BlockArrays: Block, BlockIndexRange, @@ -20,29 +21,26 @@ using BlockSparseArrays: BlockSparseMatrix, BlockSparseVector, BlockView, - block_stored_length, - block_reshape, - block_stored_indices, + blockstoredlength, + blockreshape, + eachblockstoredindex, + eachstoredblock, blockstype, blocktype, view! using GPUArraysCore: @allowscalar +using JLArrays: JLArray using LinearAlgebra: Adjoint, Transpose, dot, mul!, norm -using NDTensors.GPUArraysCoreExtensions: cpu -using SparseArraysBase: SparseArrayDOK, SparseMatrixDOK, SparseVectorDOK, stored_length +using SparseArraysBase: SparseArrayDOK, SparseMatrixDOK, SparseVectorDOK, storedlength using TensorAlgebra: contract using Test: @test, @test_broken, @test_throws, @testset, @inferred include("TestBlockSparseArraysUtils.jl") -using NDTensors: NDTensors -include(joinpath(pkgdir(NDTensors), "test", "NDTensorsTestUtils", "NDTensorsTestUtils.jl")) -using .NDTensorsTestUtils: devices_list, is_supported_eltype -@testset "BlockSparseArrays (dev=$dev, eltype=$elt)" for dev in devices_list(copy(ARGS)), +arrayts = (Array, JLArray) +@testset "BlockSparseArrays (arraytype=$arrayt, eltype=$elt)" for arrayt in arrayts, elt in (Float32, Float64, Complex{Float32}, Complex{Float64}) - if !is_supported_eltype(dev, elt) - continue - end + dev(a) = adapt(arrayt, a) @testset "Broken" begin # TODO: Fix this and turn it into a proper test. a = dev(BlockSparseArray{elt}([2, 3], [2, 3])) @@ -97,8 +95,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blockstype(a) <: SparseMatrixDOK{Matrix{elt}} @test blocklengths.(axes(a)) == ([2, 3], [3, 4]) @test iszero(a) - @test iszero(block_stored_length(a)) - @test iszero(stored_length(a)) + @test iszero(blockstoredlength(a)) + @test iszero(storedlength(a)) end end @@ -129,8 +127,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blockstype(a) <: SparseVectorDOK{Vector{elt}} @test blocklengths.(axes(a)) == ([2, 3],) @test iszero(a) - @test iszero(block_stored_length(a)) - @test iszero(stored_length(a)) + @test iszero(blockstoredlength(a)) + @test iszero(storedlength(a)) end end end @@ -145,7 +143,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blocklength.(axes(a)) == (2, 2) @test blocksize(a) == (2, 2) @test size(a) == (5, 5) - @test block_stored_length(a) == 0 + @test blockstoredlength(a) == 0 @test iszero(a) @allowscalar @test all(I -> iszero(a[I]), eachindex(a)) @test_throws DimensionMismatch a[Block(1, 1)] = randn(elt, 2, 3) @@ -158,7 +156,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test blocklength.(axes(a)) == (2, 2) @test blocksize(a) == (2, 2) @test size(a) == (5, 5) - @test block_stored_length(a) == 1 + @test blockstoredlength(a) == 1 @test !iszero(a) @test a[3, 3] == 33 @test all(eachindex(a)) do I @@ -169,6 +167,12 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype end end + a = BlockSparseArray{elt}([2, 3], [2, 3]) + a[Block(2, 1)] = randn(elt, 3, 2) + a[Block(1, 2)] = randn(elt, 2, 3) + @test issetequal(eachstoredblock(a), [a[Block(2, 1)], a[Block(1, 2)]]) + @test issetequal(eachblockstoredindex(a), [Block(2, 1), Block(1, 2)]) + a[3, 3] = NaN @test isnan(norm(a)) @@ -178,7 +182,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test isone(length(a)) @test blocksize(a) == () @test blocksizes(a) == fill(()) - @test iszero(block_stored_length(a)) + @test iszero(blockstoredlength(a)) @test iszero(@allowscalar(a[])) @test iszero(@allowscalar(a[CartesianIndex()])) @test a[Block()] == dev(fill(0)) @@ -193,7 +197,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test isone(length(b)) @test blocksize(b) == () @test blocksizes(b) == fill(()) - @test isone(block_stored_length(b)) + @test isone(blockstoredlength(b)) @test @allowscalar(b[]) == 2 @test @allowscalar(b[CartesianIndex()]) == 2 @test b[Block()] == dev(fill(2)) @@ -212,11 +216,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test at isa Transpose @test size(at) == reverse(size(a)) @test blocksize(at) == reverse(blocksize(a)) - @test stored_length(at) == stored_length(a) - @test block_stored_length(at) == block_stored_length(a) - for bind in block_stored_indices(a) + @test storedlength(at) == storedlength(a) + @test blockstoredlength(at) == blockstoredlength(a) + for bind in eachblockstoredindex(a) bindt = Block(reverse(Int.(Tuple(bind)))) - @test bindt in block_stored_indices(at) + @test bindt in eachblockstoredindex(at) end @test @views(at[Block(1, 1)]) == transpose(a[Block(1, 1)]) @@ -236,11 +240,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test at isa Adjoint @test size(at) == reverse(size(a)) @test blocksize(at) == reverse(blocksize(a)) - @test stored_length(at) == stored_length(a) - @test block_stored_length(at) == block_stored_length(a) - for bind in block_stored_indices(a) + @test storedlength(at) == storedlength(a) + @test blockstoredlength(at) == blockstoredlength(a) + for bind in eachblockstoredindex(a) bindt = Block(reverse(Int.(Tuple(bind)))) - @test bindt in block_stored_indices(at) + @test bindt in eachblockstoredindex(at) end @test @views(at[Block(1, 1)]) == adjoint(a[Block(1, 1)]) @@ -257,11 +261,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = dev(randn(elt, size(a[b]))) end @test eltype(a) == elt - @test block_stored_length(a) == 2 - @test stored_length(a) == 2 * 4 + 3 * 3 + @test blockstoredlength(a) == 2 + @test storedlength(a) == 2 * 4 + 3 * 3 # TODO: Broken on GPU. - if dev ≠ cpu + if arrayt ≠ Array a = dev(BlockSparseArray{elt}([2, 3], [3, 4])) @test_broken a[Block(1, 2)] .= 2 end @@ -274,11 +278,11 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test iszero(a[Block(1, 1)]) @test iszero(a[Block(2, 1)]) @test iszero(a[Block(2, 2)]) - @test block_stored_length(a) == 1 - @test stored_length(a) == 2 * 4 + @test blockstoredlength(a) == 1 + @test storedlength(a) == 2 * 4 # TODO: Broken on GPU. - if dev ≠ cpu + if arrayt ≠ Array a = dev(BlockSparseArray{elt}([2, 3], [3, 4])) @test_broken a[Block(1, 2)] .= 0 end @@ -291,8 +295,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test iszero(a[Block(2, 1)]) @test iszero(a[Block(1, 2)]) @test iszero(a[Block(2, 2)]) - @test block_stored_length(a) == 1 - @test stored_length(a) == 2 * 4 + @test blockstoredlength(a) == 1 + @test storedlength(a) == 2 * 4 a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) @views for b in [Block(1, 2), Block(2, 1)] @@ -301,8 +305,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = similar(a, complex(elt)) @test eltype(b) == complex(eltype(a)) @test iszero(b) - @test block_stored_length(b) == 0 - @test stored_length(b) == 0 + @test blockstoredlength(b) == 0 + @test storedlength(b) == 0 @test size(b) == size(a) @test blocksize(b) == blocksize(a) @@ -310,23 +314,23 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = @view a[[Block(2), Block(1)], [Block(2), Block(1)]] c = @view b[Block(1, 1)] @test iszero(a) - @test iszero(stored_length(a)) + @test iszero(storedlength(a)) @test iszero(b) - @test iszero(stored_length(b)) + @test iszero(storedlength(b)) # TODO: Broken on GPU. - @test iszero(c) broken = dev ≠ cpu - @test iszero(stored_length(c)) + @test iszero(c) broken = arrayt ≠ Array + @test iszero(storedlength(c)) @allowscalar a[5, 7] = 1 @test !iszero(a) - @test stored_length(a) == 3 * 4 + @test storedlength(a) == 3 * 4 @test !iszero(b) - @test stored_length(b) == 3 * 4 + @test storedlength(b) == 3 * 4 # TODO: Broken on GPU. - @test !iszero(c) broken = dev ≠ cpu - @test stored_length(c) == 3 * 4 + @test !iszero(c) broken = arrayt ≠ Array + @test storedlength(c) == 3 * 4 d = @view a[1:4, 1:6] @test iszero(d) - @test stored_length(d) == 2 * 3 + @test storedlength(d) == 2 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -360,8 +364,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = 2 * a @allowscalar @test Array(b) ≈ 2 * Array(a) @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 + @test blockstoredlength(b) == 2 + @test storedlength(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -370,8 +374,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = (2 + 3im) * a @test Array(b) ≈ (2 + 3im) * Array(a) @test eltype(b) == complex(elt) - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 + @test blockstoredlength(b) == 2 + @test storedlength(b) == 2 * 4 + 3 * 3 a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) @views for b in [Block(1, 2), Block(2, 1)] @@ -380,8 +384,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = a + a @allowscalar @test Array(b) ≈ 2 * Array(a) @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 + @test blockstoredlength(b) == 2 + @test storedlength(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -394,8 +398,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = a .+ a .+ 3 .* PermutedDimsArray(x, (2, 1)) @test Array(b) ≈ 2 * Array(a) + 3 * permutedims(Array(x), (2, 1)) @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 + @test blockstoredlength(b) == 2 + @test storedlength(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -404,15 +408,15 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b = permutedims(a, (2, 1)) @test Array(b) ≈ permutedims(Array(a), (2, 1)) @test eltype(b) == elt - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 + @test blockstoredlength(b) == 2 + @test storedlength(b) == 2 * 4 + 3 * 3 a = dev(BlockSparseArray{elt}([1, 1, 1], [1, 2, 3], [2, 2, 1], [1, 2, 1])) a[Block(3, 2, 2, 3)] = dev(randn(elt, 1, 2, 2, 1)) perm = (2, 3, 4, 1) for b in (PermutedDimsArray(a, perm), permutedims(a, perm)) @test Array(b) == permutedims(Array(a), perm) - @test issetequal(block_stored_indices(b), [Block(2, 2, 3, 3)]) + @test issetequal(eachblockstoredindex(b), [Block(2, 2, 3, 3)]) @test @allowscalar b[Block(2, 2, 3, 3)] == permutedims(a[Block(3, 2, 2, 3)], perm) end @@ -425,8 +429,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test eltype(b) == elt @test size(b) == size(a) @test blocksize(b) == (2, 2) - @test block_stored_length(b) == 2 - @test stored_length(b) == 2 * 4 + 3 * 3 + @test blockstoredlength(b) == 2 + @test storedlength(b) == 2 * 4 + 3 * 3 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -439,8 +443,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b[Block(2, 2)] == a[Block(1, 1)] @test size(b) == size(a) @test blocksize(b) == (2, 2) - @test stored_length(b) == stored_length(a) - @test block_stored_length(b) == 2 + @test storedlength(b) == storedlength(a) + @test blockstoredlength(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -450,8 +454,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b == a @test size(b) == size(a) @test blocksize(b) == (2, 2) - @test stored_length(b) == stored_length(a) - @test block_stored_length(b) == 2 + @test storedlength(b) == storedlength(a) + @test blockstoredlength(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -463,8 +467,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b[Block(1, 2)] == a[Block(1, 2)] @test size(b) == (2, 7) @test blocksize(b) == (1, 2) - @test stored_length(b) == stored_length(a[Block(1, 2)]) - @test block_stored_length(b) == 1 + @test storedlength(b) == storedlength(a[Block(1, 2)]) + @test blockstoredlength(b) == 1 a = dev(BlockSparseArray{elt}(undef, ([2, 3], [3, 4]))) @views for b in [Block(1, 2), Block(2, 1)] @@ -474,8 +478,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @allowscalar @test b == Array(a)[2:4, 2:4] @test size(b) == (3, 3) @test blocksize(b) == (2, 2) - @test stored_length(b) == 1 * 1 + 2 * 2 - @test block_stored_length(b) == 2 + @test storedlength(b) == 1 * 1 + 2 * 2 + @test blockstoredlength(b) == 2 for f in (getindex, view) # TODO: Broken on GPU. @allowscalar begin @@ -499,18 +503,18 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b == Array(a)[3:4, 2:3] @test size(b) == (2, 2) @test blocksize(b) == (1, 1) - @test stored_length(b) == 2 * 2 - @test block_stored_length(b) == 1 + @test storedlength(b) == 2 * 2 + @test blockstoredlength(b) == 1 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] a[b] = randn(elt, size(a[b])) end b = PermutedDimsArray(a, (2, 1)) - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == permutedims(Array(a), (2, 1)) c = 2 * b - @test block_stored_length(c) == 2 + @test blockstoredlength(c) == 2 @test Array(c) == 2 * permutedims(Array(a), (2, 1)) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @@ -518,10 +522,10 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = randn(elt, size(a[b])) end b = a' - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == Array(a)' c = 2 * b - @test block_stored_length(c) == 2 + @test blockstoredlength(c) == 2 @test Array(c) == 2 * Array(a)' a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @@ -529,10 +533,10 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = randn(elt, size(a[b])) end b = transpose(a) - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 @test Array(b) == transpose(Array(a)) c = 2 * b - @test block_stored_length(c) == 2 + @test blockstoredlength(c) == 2 @test Array(c) == 2 * transpose(Array(a)) a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @@ -604,7 +608,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b .= x @test a[Block(2, 2)[1:2, 2:3]] == x @test a[Block(2, 2)[1:2, 2:3]] == b - @test block_stored_length(a) == 1 + @test blockstoredlength(a) == 1 a = BlockSparseArray{elt}([2, 3], [2, 3]) @views for b in [Block(1, 1), Block(2, 2)] @@ -644,7 +648,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a[b] = randn(elt, size(a[b])) end b = a[Block(2):Block(2), Block(1):Block(2)] - @test block_stored_length(b) == 1 + @test blockstoredlength(b) == 1 @test b == Array(a)[3:5, 1:end] a = BlockSparseArray{elt}(undef, ([2, 3, 4], [2, 3, 4])) @@ -657,8 +661,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype ([Block(2)[2:3], Block(3)[1:3]], [Block(2)[2:3], Block(3)[2:3]]), ) for b in (a[I1, I2], @view(a[I1, I2])) - # TODO: Rename `block_stored_length`. - @test block_stored_length(b) == 2 + # TODO: Rename `blockstoredlength`. + @test blockstoredlength(b) == 2 @test b[Block(1, 1)] == a[Block(2, 2)[2:3, 2:3]] @test b[Block(2, 2)] == a[Block(3, 3)[1:3, 2:3]] end @@ -676,9 +680,9 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test b[Block(1, 2)] == a[Block(1, 2)][:, 1:2] @test b[Block(2, 2)] == a[Block(2, 2)][:, 1:2] @test blocklengths.(axes(b)) == ([3, 3], [2, 2]) - # TODO: Rename `block_stored_length`. + # TODO: Rename `blockstoredlength`. @test blocksize(b) == (2, 2) - @test block_stored_length(b) == 2 + @test blockstoredlength(b) == 2 a = BlockSparseArray{elt}(undef, ([2, 3], [3, 4])) @views for b in [Block(1, 2), Block(2, 1)] @@ -709,31 +713,45 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a = BlockSparseArray{elt}([2, 3], [3, 4]) @test iszero(a) - @test iszero(block_stored_length(a)) + @test iszero(blockstoredlength(a)) fill!(a, 0) @test iszero(a) - @test iszero(block_stored_length(a)) + @test iszero(blockstoredlength(a)) fill!(a, 2) @test !iszero(a) @test all(==(2), a) - @test block_stored_length(a) == 4 + @test blockstoredlength(a) == 4 fill!(a, 0) @test iszero(a) - @test iszero(block_stored_length(a)) + @test iszero(blockstoredlength(a)) + + a = BlockSparseArray{elt}([2, 3], [3, 4]) + @test iszero(a) + @test iszero(blockstoredlength(a)) + zero!(a) + @test iszero(a) + @test iszero(blockstoredlength(a)) + fill!(a, 2) + @test !iszero(a) + @test all(==(2), a) + @test blockstoredlength(a) == 4 + zero!(a) + @test iszero(a) + @test iszero(blockstoredlength(a)) a = BlockSparseArray{elt}([2, 3], [3, 4]) @test iszero(a) - @test iszero(block_stored_length(a)) + @test iszero(blockstoredlength(a)) a .= 0 @test iszero(a) - @test iszero(block_stored_length(a)) + @test iszero(blockstoredlength(a)) a .= 2 @test !iszero(a) @test all(==(2), a) - @test block_stored_length(a) == 4 + @test blockstoredlength(a) == 4 a .= 0 @test iszero(a) - @test iszero(block_stored_length(a)) + @test iszero(blockstoredlength(a)) # TODO: Broken on GPU. a = BlockSparseArray{elt}([2, 3], [3, 4]) @@ -772,13 +790,13 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype for abx in (f1(), f2()) (; a, b, x) = abx @test b isa SubArray{<:Any,<:Any,<:BlockSparseArray} - @test block_stored_length(b) == 1 + @test blockstoredlength(b) == 1 @test b[Block(1, 1)] == x @test @view(b[Block(1, 1)]) isa Matrix{elt} for blck in [Block(2, 1), Block(1, 2), Block(2, 2)] @test iszero(b[blck]) end - @test block_stored_length(a) == 1 + @test blockstoredlength(a) == 1 @test a[Block(2, 2)] == x for blck in [Block(1, 1), Block(2, 1), Block(1, 2)] @test iszero(a[blck]) @@ -794,7 +812,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype b .= x @test b == x @test a[Block(1, 2)] == x - @test block_stored_length(a) == 1 + @test blockstoredlength(a) == 1 a = BlockSparseArray{elt}([4, 3, 2], [4, 3, 2]) @views for B in [Block(1, 1), Block(2, 2), Block(3, 3)] @@ -805,7 +823,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype c = @view b[4:8, 4:8] @test c isa SubArray{<:Any,<:Any,<:BlockSparseArray} @test size(c) == (5, 5) - @test block_stored_length(c) == 2 + @test blockstoredlength(c) == 2 @test blocksize(c) == (2, 2) @test blocklengths.(axes(c)) == ([2, 3], [2, 3]) @test size(c[Block(1, 1)]) == (2, 2) @@ -952,7 +970,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a_dest = a1 * a2 @allowscalar @test Array(a_dest) ≈ Array(a1) * Array(a2) @test a_dest isa BlockSparseArray{elt} - @test block_stored_length(a_dest) == 1 + @test blockstoredlength(a_dest) == 1 end @testset "Matrix multiplication" begin a1 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) @@ -983,23 +1001,23 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a2[Block(1, 2)] = dev(randn(elt, size(@view(a2[Block(1, 2)])))) a_dest = cat(a1, a2; dims=1) - @test block_stored_length(a_dest) == 2 + @test blockstoredlength(a_dest) == 2 @test blocklengths.(axes(a_dest)) == ([2, 3, 2, 3], [2, 3]) - @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(3, 2)]) + @test issetequal(eachblockstoredindex(a_dest), [Block(2, 1), Block(3, 2)]) @test a_dest[Block(2, 1)] == a1[Block(2, 1)] @test a_dest[Block(3, 2)] == a2[Block(1, 2)] a_dest = cat(a1, a2; dims=2) - @test block_stored_length(a_dest) == 2 + @test blockstoredlength(a_dest) == 2 @test blocklengths.(axes(a_dest)) == ([2, 3], [2, 3, 2, 3]) - @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(1, 4)]) + @test issetequal(eachblockstoredindex(a_dest), [Block(2, 1), Block(1, 4)]) @test a_dest[Block(2, 1)] == a1[Block(2, 1)] @test a_dest[Block(1, 4)] == a2[Block(1, 2)] a_dest = cat(a1, a2; dims=(1, 2)) - @test block_stored_length(a_dest) == 2 + @test blockstoredlength(a_dest) == 2 @test blocklengths.(axes(a_dest)) == ([2, 3, 2, 3], [2, 3, 2, 3]) - @test issetequal(block_stored_indices(a_dest), [Block(2, 1), Block(3, 4)]) + @test issetequal(eachblockstoredindex(a_dest), [Block(2, 1), Block(3, 4)]) @test a_dest[Block(2, 1)] == a1[Block(2, 1)] @test a_dest[Block(3, 4)] == a2[Block(1, 2)] end @@ -1009,7 +1027,7 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype a2 = dev(BlockSparseArray{elt}([2, 3], [2, 3])) a2[Block(1, 1)] = dev(randn(elt, size(@view(a1[Block(1, 1)])))) # TODO: Make this work, requires customization of `TensorAlgebra.fusedims` and - # `TensorAlgebra.splitdims` in terms of `BlockSparseArrays.block_reshape`, + # `TensorAlgebra.splitdims` in terms of `BlockSparseArrays.blockreshape`, # and customization of `TensorAlgebra.:⊗` in terms of `GradedUnitRanges.tensor_product`. a_dest, dimnames_dest = contract(a1, (1, -1), a2, (-1, 2)) @allowscalar begin @@ -1017,15 +1035,14 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype @test a_dest ≈ a_dest_dense end end - @testset "block_reshape" begin + @testset "blockreshape" begin a = dev(BlockSparseArray{elt}(undef, ([3, 4], [2, 3]))) a[Block(1, 2)] = dev(randn(elt, size(@view(a[Block(1, 2)])))) a[Block(2, 1)] = dev(randn(elt, size(@view(a[Block(2, 1)])))) - b = block_reshape(a, [6, 8, 9, 12]) + b = blockreshape(a, [6, 8, 9, 12]) @test reshape(a[Block(1, 2)], 9) == b[Block(3)] @test reshape(a[Block(2, 1)], 8) == b[Block(2)] - @test block_stored_length(b) == 2 - @test stored_length(b) == 17 + @test blockstoredlength(b) == 2 + @test storedlength(b) == 17 end end -end