Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 1 addition & 3 deletions NDTensors/Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman <[email protected]>"]
version = "0.4.12"
version = "0.4.13"

[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
Expand All @@ -28,7 +28,6 @@ Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67"
StridedViews = "4db3bf67-4bd7-4b4e-b153-31dc3fb37143"
TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"
TypeParameterAccessors = "7e5a90cf-f82e-492e-a09b-e3e26432c138"
VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8"

[weakdeps]
Expand Down Expand Up @@ -88,7 +87,6 @@ StridedViews = "0.2.2, 0.3, 0.4"
TBLIS = "0.2"
TimerOutputs = "0.5.5"
TupleTools = "1.2.0"
TypeParameterAccessors = "0.3"
VectorInterface = "0.4.2, 0.5"
cuTENSOR = "2"
julia = "1.10"
Expand Down
5 changes: 5 additions & 0 deletions NDTensors/src/NDTensors.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
module NDTensors

module Vendored
include(joinpath("vendored", "TypeParameterAccessors", "src", "TypeParameterAccessors.jl"))
end

#####################################
# Imports and exports
#
Expand Down
48 changes: 24 additions & 24 deletions NDTensors/src/abstractarray/generic_array_constructors.jl
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
using TypeParameterAccessors:
unwrap_array_type,
specify_default_type_parameters,
specify_type_parameters,
type_parameters
using .Vendored.TypeParameterAccessors:
unwrap_array_type,
specify_default_type_parameters,
specify_type_parameters,
type_parameters

# Convert to Array, avoiding copying if possible
array(a::AbstractArray) = a
Expand All @@ -12,33 +12,33 @@ vector(a::AbstractVector) = a
## Warning to use these functions it is necessary to define `TypeParameterAccessors.position(::Type{<:YourArrayType}, ::typeof(ndims)))`
# Implementation, catches if `ndims(arraytype) != length(dims)`.
## TODO convert ndims to `type_parameters(::, typeof(ndims))`
function generic_randn(arraytype::Type{<:AbstractArray}, dims...; rng=Random.default_rng())
arraytype_specified = specify_type_parameters(
unwrap_array_type(arraytype), ndims, length(dims)
)
arraytype_specified = specify_default_type_parameters(arraytype_specified)
@assert length(dims) == ndims(arraytype_specified)
data = similar(arraytype_specified, dims...)
return randn!(rng, data)
function generic_randn(arraytype::Type{<:AbstractArray}, dims...; rng = Random.default_rng())
arraytype_specified = specify_type_parameters(
unwrap_array_type(arraytype), ndims, length(dims)
)
arraytype_specified = specify_default_type_parameters(arraytype_specified)
@assert length(dims) == ndims(arraytype_specified)
data = similar(arraytype_specified, dims...)
return randn!(rng, data)
end

function generic_randn(
arraytype::Type{<:AbstractArray}, dims::Tuple; rng=Random.default_rng()
)
return generic_randn(arraytype, dims...; rng)
arraytype::Type{<:AbstractArray}, dims::Tuple; rng = Random.default_rng()
)
return generic_randn(arraytype, dims...; rng)
end

# Implementation, catches if `ndims(arraytype) != length(dims)`.
function generic_zeros(arraytype::Type{<:AbstractArray}, dims...)
arraytype_specified = specify_type_parameters(
unwrap_array_type(arraytype), ndims, length(dims)
)
arraytype_specified = specify_default_type_parameters(arraytype_specified)
@assert length(dims) == ndims(arraytype_specified)
ElT = eltype(arraytype_specified)
return fill!(similar(arraytype_specified, dims...), zero(ElT))
arraytype_specified = specify_type_parameters(
unwrap_array_type(arraytype), ndims, length(dims)
)
arraytype_specified = specify_default_type_parameters(arraytype_specified)
@assert length(dims) == ndims(arraytype_specified)
ElT = eltype(arraytype_specified)
return fill!(similar(arraytype_specified, dims...), zero(ElT))
end

function generic_zeros(arraytype::Type{<:AbstractArray}, dims::Tuple)
return generic_zeros(arraytype, dims...)
return generic_zeros(arraytype, dims...)
end
4 changes: 2 additions & 2 deletions NDTensors/src/abstractarray/iscu.jl
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
using TypeParameterAccessors: unwrap_array_type
using .Vendored.TypeParameterAccessors: unwrap_array_type
# TODO: Make `isgpu`, `ismtl`, etc.
# For `isgpu`, will require a `NDTensorsGPUArrayCoreExt`.
iscu(A::AbstractArray) = iscu(typeof(A))
function iscu(A::Type{<:AbstractArray})
return (unwrap_array_type(A) == A ? false : iscu(unwrap_array_type(A)))
return (unwrap_array_type(A) == A ? false : iscu(unwrap_array_type(A)))
end
4 changes: 2 additions & 2 deletions NDTensors/src/abstractarray/set_types.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
using TypeParameterAccessors: TypeParameterAccessors
using .Vendored.TypeParameterAccessors: TypeParameterAccessors

"""
# Do we still want to define things like this?
Expand All @@ -13,5 +13,5 @@ TODO: Use `Accessors.jl` notation:
# `FillArray` instead. This is a stand-in
# to make things work with the current design.
function TypeParameterAccessors.set_ndims(numbertype::Type{<:Number}, ndims)
return numbertype
return numbertype
end
28 changes: 14 additions & 14 deletions NDTensors/src/abstractarray/similar.jl
Original file line number Diff line number Diff line change
@@ -1,42 +1,42 @@
using Base: DimOrInd, Dims, OneTo
using TypeParameterAccessors: IsWrappedArray, unwrap_array_type, set_eltype, similartype
using .Vendored.TypeParameterAccessors: IsWrappedArray, unwrap_array_type, set_eltype, similartype

## Custom `NDTensors.similar` implementation.
## More extensive than `Base.similar`.

# This function actually allocates the data.
# NDTensors.similar
function similar(arraytype::Type{<:AbstractArray}, dims::Tuple)
shape = NDTensors.to_shape(arraytype, dims)
return similartype(arraytype, shape)(undef, NDTensors.to_shape(arraytype, shape))
shape = NDTensors.to_shape(arraytype, dims)
return similartype(arraytype, shape)(undef, NDTensors.to_shape(arraytype, shape))
end

# This function actually allocates the data.
# Catches conversions of dimensions specified by ranges
# dimensions specified by integers with `Base.to_shape`.
# NDTensors.similar
function similar(arraytype::Type{<:AbstractArray}, dims::Dims)
return similartype(arraytype, dims)(undef, dims)
return similartype(arraytype, dims)(undef, dims)
end

# NDTensors.similar
function similar(arraytype::Type{<:AbstractArray}, dims::DimOrInd...)
return similar(arraytype, NDTensors.to_shape(dims))
return similar(arraytype, NDTensors.to_shape(dims))
end

# Handles range inputs, `Base.to_shape` converts them to integer dimensions.
# See Julia's `base/abstractarray.jl`.
# NDTensors.similar
function similar(
arraytype::Type{<:AbstractArray},
shape::Tuple{Union{Integer,OneTo},Vararg{Union{Integer,OneTo}}},
)
return NDTensors.similar(arraytype, NDTensors.to_shape(shape))
arraytype::Type{<:AbstractArray},
shape::Tuple{Union{Integer, OneTo}, Vararg{Union{Integer, OneTo}}},
)
return NDTensors.similar(arraytype, NDTensors.to_shape(shape))
end

# NDTensors.similar
function similar(arraytype::Type{<:AbstractArray}, eltype::Type, dims::Tuple)
return NDTensors.similar(similartype(arraytype, eltype, dims), dims)
return NDTensors.similar(similartype(arraytype, eltype, dims), dims)
end

# TODO: Add an input `structure` which can store things like the nonzero
Expand Down Expand Up @@ -70,19 +70,19 @@ end
# TODO: Maybe makes an empty array, i.e. `similartype(arraytype, eltype)()`?
# NDTensors.similar
function similar(arraytype::Type{<:AbstractArray}, eltype::Type)
return error("Must specify dimensions.")
return error("Must specify dimensions.")
end

## NDTensors.similar for instances

# NDTensors.similar
function similar(array::AbstractArray, eltype::Type, dims::Tuple)
return NDTensors.similar(similartype(typeof(array), eltype), dims)
return NDTensors.similar(similartype(typeof(array), eltype), dims)
end

# NDTensors.similar
function similar(array::AbstractArray, eltype::Type, dims::Int)
return NDTensors.similar(similartype(typeof(array), eltype), dims)
return NDTensors.similar(similartype(typeof(array), eltype), dims)
end

# NDTensors.similar
Expand All @@ -91,7 +91,7 @@ similar(array::AbstractArray, dims::Tuple) = NDTensors.similar(typeof(array), di
# Use the `size` to determine the dimensions
# NDTensors.similar
function similar(array::AbstractArray, eltype::Type)
return NDTensors.similar(typeof(array), eltype, size(array))
return NDTensors.similar(typeof(array), eltype, size(array))
end

# Use the `size` to determine the dimensions
Expand Down
8 changes: 4 additions & 4 deletions NDTensors/src/adapt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ adapt_structure(to, x::TensorStorage) = setdata(x, adapt(to, data(x)))
adapt_structure(to, x::Tensor) = setstorage(x, adapt(to, storage(x)))

function GPUArraysCoreExtensions.cpu(eltype::Type{<:Number}, x)
return fmap(x -> adapt(Array{eltype}, x), x)
return fmap(x -> adapt(Array{eltype}, x), x)
end
GPUArraysCoreExtensions.cpu(x) = fmap(x -> adapt(Array, x), x)

Expand All @@ -27,11 +27,11 @@ double_precision(x) = fmap(x -> adapt(double_precision(eltype(x)), x), x)
# Used to adapt `EmptyStorage` types
#

using TypeParameterAccessors: specify_type_parameters
using .Vendored.TypeParameterAccessors: specify_type_parameters
function adapt_storagetype(to::Type{<:AbstractVector}, x::Type{<:TensorStorage})
return set_datatype(x, specify_type_parameters(to, eltype, eltype(x)))
return set_datatype(x, specify_type_parameters(to, eltype, eltype(x)))
end

function adapt_storagetype(to::Type{<:AbstractArray}, x::Type{<:TensorStorage})
return set_datatype(x, specify_type_parameters(to, (ndims, eltype), (1, eltype(x))))
return set_datatype(x, specify_type_parameters(to, (ndims, eltype), (1, eltype(x))))
end
Loading
Loading