Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "BlockTensorKit"
uuid = "5f87ffc2-9cf1-4a46-8172-465d160bd8cd"
version = "0.3.6"
version = "0.3.7"
authors = ["Lukas Devos <ldevos98@gmail.com> and contributors"]

[deps]
Expand Down
21 changes: 16 additions & 5 deletions src/auxiliary/sparsetensorarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -35,33 +35,44 @@ Base.keys(A::SparseTensorArray) = keys(A.data)
Base.values(A::SparseTensorArray) = values(A.data)

TensorKit.space(A::SparseTensorArray) = A.space
TensorKit.codomain(A::SparseTensorArray) = codomain(space(A))
TensorKit.domain(A::SparseTensorArray) = domain(space(A))

TensorKit.numout(A::SparseTensorArray) = numout(eltype(A))
TensorKit.numout(::Type{T}) where {T <: SparseTensorArray} = numout(eltype(A))
TensorKit.numin(A::SparseTensorArray) = numin(eltype(A))
TensorKit.numin(::Type{T}) where {T <: SparseTensorArray} = numin(eltype(A))

# AbstractArray interface
# -----------------------
Base.size(A::SparseTensorArray) = ntuple(i -> length(space(A)[i]), ndims(A))
Base.size(A::SparseTensorArray) = ntuple(Base.Fix1(size, A), ndims(A))
function Base.size(A::SparseTensorArray, i::Int)
1 ≤ i ≤ ndims(A) || throw(ArgumentError("Invalid number of dimensions"))
return i <= numout(A) ? length(codomain(A)[i]) : length(domain(A)[i - numout(A)])
end

@inline function Base.getindex(
@propagate_inbounds function Base.getindex(
A::SparseTensorArray{S, N₁, N₂, T, N}, I::Vararg{Int, N}
) where {S, N₁, N₂, T, N}
@boundscheck checkbounds(A, I...)
return @inbounds get(A.data, CartesianIndex(I)) do
return fill!(similar(T, eachspace(A)[I...]), zero(scalartype(T)))
end
end
@inline function getindex!(
@propagate_inbounds function getindex!(
A::SparseTensorArray{S, N₁, N₂, T, N}, I::CartesianIndex{N}
) where {S, N₁, N₂, T, N}
@boundscheck checkbounds(A, I)
return @inbounds get!(A.data, I) do
return fill!(similar(T, eachspace(A)[I]), zero(scalartype(T)))
end
end
@inline function getindex!(
@propagate_inbounds function getindex!(
A::SparseTensorArray{S, N₁, N₂, T, N}, I::Vararg{Int, N}
) where {S, N₁, N₂, T, N}
return getindex!(A, CartesianIndex(I))
end
@inline function Base.setindex!(
@propagate_inbounds function Base.setindex!(
A::SparseTensorArray{S, N₁, N₂, T, N}, v, I::Vararg{Int, N}
) where {S, N₁, N₂, T, N}
@boundscheck begin
Expand Down
12 changes: 6 additions & 6 deletions src/tensors/abstractblocktensor/abstractarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -80,13 +80,13 @@ function checkspaces(t::AbstractBlockTensorMap)
end

# scalar indexing is dispatched through:
@inline Base.getindex(t::AbstractBlockTensorMap, I::Vararg{Int, N}) where {N} =
@propagate_inbounds Base.getindex(t::AbstractBlockTensorMap, I::Vararg{Int, N}) where {N} =
getindex(parent(t), I...)
@inline Base.getindex(t::AbstractBlockTensorMap, I::CartesianIndex{N}) where {N} =
@propagate_inbounds Base.getindex(t::AbstractBlockTensorMap, I::CartesianIndex{N}) where {N} =
getindex(parent(t), I)
@inline getindex!(t::AbstractBlockTensorMap, I::Vararg{Int, N}) where {N} =
@propagate_inbounds getindex!(t::AbstractBlockTensorMap, I::Vararg{Int, N}) where {N} =
getindex!(parent(t), I...)
@inline getindex!(t::AbstractBlockTensorMap, I::CartesianIndex{N}) where {N} =
@propagate_inbounds getindex!(t::AbstractBlockTensorMap, I::CartesianIndex{N}) where {N} =
getindex!(parent(t), I)

# slicing getindex needs to correctly allocate output blocktensor:
Expand Down Expand Up @@ -115,7 +115,7 @@ Base.@propagate_inbounds function Base.getindex(
end

# disambiguate:
Base.@propagate_inbounds function Base.getindex(
@propagate_inbounds function Base.getindex(
t::AbstractBlockTensorMap, indices::Vararg{Strided.SliceIndex}
)
V = space(eachspace(t)[indices...])
Expand All @@ -138,7 +138,7 @@ Base.@propagate_inbounds function Base.getindex(
end

# TODO: check if this fallback is fair
@inline Base.setindex!(t::AbstractBlockTensorMap, v::AbstractTensorMap, args...) = (
@propagate_inbounds Base.setindex!(t::AbstractBlockTensorMap, v::AbstractTensorMap, args...) = (
setindex!(parent(t), v, args...); t
)

Expand Down
12 changes: 6 additions & 6 deletions src/tensors/indexmanipulations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ function TK.add_transform!(
end
scale!(tdst, β)
p = (p₁..., p₂...)
for (I, v) in nonzero_pairs(tsrc)
@inbounds for (I, v) in nonzero_pairs(tsrc)
I′ = CartesianIndex(TT.getindices(I.I, p))
tdst[I′] = TK.add_transform!(
tdst[I′], v, (p₁, p₂), fusiontreetransform, α, One(), backend...
Expand All @@ -55,7 +55,7 @@ function TK.add_transform!(
end
scale!(tdst, β)
p = (p₁..., p₂...)
for (I, v) in nonzero_pairs(tsrc)
@inbounds for (I, v) in nonzero_pairs(tsrc)
I′ = CartesianIndex(TT.getindices(I.I, p))
tdst[I′] = TK.add_transform!(
tdst[I′], v, (p₁, p₂), fusiontreetransform, α, One(), backend...
Expand Down Expand Up @@ -121,7 +121,7 @@ for f! in (:add_permute!, :add_transpose!)
end
scale!(tdst, β)
p = (p₁..., p₂...)
for (I, v) in nonzero_pairs(tsrc)
@inbounds for (I, v) in nonzero_pairs(tsrc)
I′ = CartesianIndex(TT.getindices(I.I, p))
tdst[I′] = TK.$f!(tdst[I′], v, (p₁, p₂), α, One(), backend...)
end
Expand All @@ -140,7 +140,7 @@ for f! in (:add_permute!, :add_transpose!)
end
scale!(tdst, β)
p = (p₁..., p₂...)
for (I, v) in nonzero_pairs(tsrc)
@inbounds for (I, v) in nonzero_pairs(tsrc)
I′ = CartesianIndex(TT.getindices(I.I, p))
tdst[I′] = TK.$f!(tdst[I′], v, (p₁, p₂), α, One(), backend...)
end
Expand Down Expand Up @@ -193,7 +193,7 @@ function TK.add_braid!(
end
scale!(tdst, β)
p = (p₁..., p₂...)
for (I, v) in nonzero_pairs(tsrc)
@inbounds for (I, v) in nonzero_pairs(tsrc)
I′ = CartesianIndex(TT.getindices(I.I, p))
tdst[I′] = TK.add_braid!(tdst[I′], v, (p₁, p₂), levels, α, One(), backend...)
end
Expand All @@ -213,7 +213,7 @@ function TK.add_braid!(
end
scale!(tdst, β)
p = (p₁..., p₂...)
for (I, v) in nonzero_pairs(tsrc)
@inbounds for (I, v) in nonzero_pairs(tsrc)
I′ = CartesianIndex(TT.getindices(I.I, p))
tdst[I′] = TK.add_braid!(tdst[I′], v, (p₁, p₂), levels, α, One(), backend...)
end
Expand Down
37 changes: 1 addition & 36 deletions src/tensors/tensoroperations.jl
Original file line number Diff line number Diff line change
Expand Up @@ -36,50 +36,15 @@ for TTA in (:AbstractTensorMap, :AbstractBlockTensorMap), TTB in (:AbstractTenso
end
end

function TO.tensoralloc_contract(
TC,
A::AbstractBlockTensorMap, pA::Index2Tuple, conjA::Bool,
B::AbstractBlockTensorMap, pB::Index2Tuple, conjB::Bool,
pAB::Index2Tuple,
istemp::Val = Val(false),
allocator = TO.DefaultAllocator(),
)
ttype = TO.tensorcontract_type(TC, A, pA, conjA, B, pB, conjB, pAB)
structure = TO.tensorcontract_structure(A, pA, conjA, B, pB, conjB, pAB)
TT = eltype(ttype)

if isabstracttype(TT)
# do not allocate, use undef allocator
E, S, N1, N2 = scalartype(TT), spacetype(TT), numout(structure), numin(structure)
if issparse(A) && issparse(B)
return SparseBlockTensorMap{AbstractTensorMap{E, S, N1, N2}}(
undef, codomain(structure), domain(structure)
)
else
return BlockTensorMap{AbstractTensorMap{E, S, N1, N2}}(
undef, codomain(structure), domain(structure)
)
end
else
return tensoralloc(ttype, structure, istemp, allocator)
end
end

function promote_blocktype(::Type{TT}, ::Type{A₁}, ::Type{A₂}) where {TT, A₁, A₂}
N = similarblocktype(A₁, TT)
@assert N === similarblocktype(A₂, TT) "incompatible block types"
return N
end

function similarblocktype(::Type{A}, ::Type{TT}) where {A, TT}
return Core.Compiler.return_type(similar, Tuple{A, Type{TT}, NTuple{numind(TT), Int}})
end

# By default, make "dense" allocations
function TO.tensoralloc(
::Type{BT}, structure::TensorMapSumSpace, istemp::Val, allocator = TO.DefaultAllocator()
) where {BT <: AbstractBlockTensorMap}
C = BT(undef_blocks, structure)
issparse(C) && return C # don't fill up sparse blocks
blockallocator(V) = TO.tensoralloc(eltype(C), V, istemp, allocator)
map!(blockallocator, parent(C), eachspace(C))
return C
Expand Down