From c0621b737180ef0bc36f0d10ab782986358c0028 Mon Sep 17 00:00:00 2001 From: Matt Fishman Date: Wed, 24 Apr 2024 11:01:26 -0400 Subject: [PATCH] More package extensions (#161) --- .github/workflows/CI.yml | 1 - Project.toml | 26 ++- .../ITensorNetworksGraphsFlowsExt.jl | 19 ++ ...sorNetworksOMEinsumContractionOrdersExt.jl | 185 ++++++++++++++++++ .../ITensorNetworksObserversExt.jl | 9 + src/ITensorNetworks.jl | 6 +- src/apply.jl | 1 - src/contract_approx/mincut.jl | 16 +- src/contraction_sequences.jl | 108 ---------- src/lib/ITensorsExtensions/src/itensor.jl | 1 - src/observers.jl | 7 - src/requires/omeinsumcontractionorders.jl | 91 --------- ...ontractionorders_itensorcontractiontree.jl | 154 --------------- .../alternating_update/alternating_update.jl | 5 +- .../alternating_update/region_update.jl | 14 +- src/update_observer.jl | 11 ++ test/Project.toml | 1 - test/test_belief_propagation.jl | 2 + test/test_binary_tree_partition.jl | 4 +- test/test_contract_deltas.jl | 2 + test/test_contraction_sequence.jl | 3 +- 21 files changed, 266 insertions(+), 400 deletions(-) create mode 100644 ext/ITensorNetworksGraphsFlowsExt/ITensorNetworksGraphsFlowsExt.jl create mode 100644 ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl create mode 100644 ext/ITensorNetworksObserversExt/ITensorNetworksObserversExt.jl delete mode 100644 src/observers.jl delete mode 100644 src/requires/omeinsumcontractionorders.jl delete mode 100644 src/requires/omeinsumcontractionorders_itensorcontractiontree.jl create mode 100644 src/update_observer.jl diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 905edaa7..d4f1c07d 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -18,7 +18,6 @@ jobs: fail-fast: false matrix: version: - - '1.7' - '1' os: - ubuntu-latest diff --git a/Project.toml b/Project.toml index fba18681..0c144636 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" authors = ["Matthew Fishman and contributors"] -version = "0.9.0" +version = "0.10.0" [deps] AbstractTrees = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" @@ -13,7 +13,6 @@ Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" -GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889" ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" IsApprox = "28f27b66-4bd8-47e7-9110-e2746eb8bed7" IterTools = "c8e1da08-722c-5040-9ed9-7db0dc04731e" @@ -21,11 +20,9 @@ KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" NamedGraphs = "678767b0-92e7-4007-89e4-4527a8725b19" -Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" PackageExtensionCompat = "65ce6f38-6b18-4e1d-a461-8949797d7930" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" -Requires = "ae029012-a4dd-5104-9daa-d747884805df" SerializedElementArrays = "d3ce8812-9567-47e9-a7b5-65a6d70a3065" SimpleTraits = "699a6c99-e7fa-54fc-8d76-47d257e15c1d" SparseArrayKit = "a9a3c162-d163-4c15-8926-b8794fbefed2" @@ -38,9 +35,15 @@ TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6" [weakdeps] EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5" +GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889" +Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" +OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" [extensions] ITensorNetworksEinExprsExt = "EinExprs" +ITensorNetworksGraphsFlowsExt = "GraphsFlows" +ITensorNetworksObserversExt = "Observers" +ITensorNetworksOMEinsumContractionOrdersExt = "OMEinsumContractionOrders" [compat] AbstractTrees = "0.4.4" @@ -50,32 +53,35 @@ DataGraphs = "0.2.2" DataStructures = "0.18" Dictionaries = "0.4" Distributions = "0.25.86" -DocStringExtensions = "0.8, 0.9" +DocStringExtensions = "0.9" EinExprs = "0.6.4" Graphs = "1.8" GraphsFlows = "0.1.1" -ITensors = "0.3.58, 0.4" +ITensors = "0.4" IsApprox = "0.1" IterTools = "1.4.0" KrylovKit = "0.6, 0.7" NamedGraphs = "0.5.1" -NDTensors = "0.2, 0.3" +NDTensors = "0.3" Observers = "0.2" +OMEinsumContractionOrders = "0.8.3" PackageExtensionCompat = "1" -Requires = "1.3" SerializedElementArrays = "0.1" SimpleTraits = "0.9" -SparseArrayKit = "0.2.1, 0.3" +SparseArrayKit = "0.3" SplitApplyCombine = "1.2" StaticArrays = "1.5.12" StructWalk = "0.2" Suppressor = "0.2" TimerOutputs = "0.5.22" TupleTools = "1.4" -julia = "1.7" +julia = "1.10" [extras] EinExprs = "b1794770-133b-4de1-afb4-526377e9f4c5" +GraphsFlows = "06909019-6f44-4949-96fc-b9d9aaa02889" +Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0" +OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] diff --git a/ext/ITensorNetworksGraphsFlowsExt/ITensorNetworksGraphsFlowsExt.jl b/ext/ITensorNetworksGraphsFlowsExt/ITensorNetworksGraphsFlowsExt.jl new file mode 100644 index 00000000..dfe9fc51 --- /dev/null +++ b/ext/ITensorNetworksGraphsFlowsExt/ITensorNetworksGraphsFlowsExt.jl @@ -0,0 +1,19 @@ +module ITensorNetworksGraphsFlowsExt +using Graphs: AbstractGraph +using GraphsFlows: GraphsFlows +using ITensorNetworks: ITensorNetworks +using NDTensors.AlgorithmSelection: @Algorithm_str + +function ITensorNetworks.mincut( + ::Algorithm"GraphsFlows", + graph::AbstractGraph, + source_vertex, + target_vertex; + capacity_matrix, + alg=GraphsFlows.PushRelabelAlgorithm(), +) + # TODO: Replace with `Backend(backend)`. + return GraphsFlows.mincut(graph, source_vertex, target_vertex, capacity_matrix, alg) +end + +end diff --git a/ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl b/ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl new file mode 100644 index 00000000..6511327f --- /dev/null +++ b/ext/ITensorNetworksOMEinsumContractionOrdersExt/ITensorNetworksOMEinsumContractionOrdersExt.jl @@ -0,0 +1,185 @@ +module ITensorNetworksOMEinsumContractionOrdersExt +using DocStringExtensions: TYPEDSIGNATURES +using ITensorNetworks: ITensorNetworks +using ITensors: ITensors, Index, ITensor, inds +using NDTensors: dim +using NDTensors.AlgorithmSelection: @Algorithm_str +using OMEinsumContractionOrders: OMEinsumContractionOrders + +# OMEinsumContractionOrders wrapper for ITensors +# Slicing is not supported, because it might require extra work to slice an `ITensor` correctly. + +const ITensorList = Union{Vector{ITensor},Tuple{Vararg{ITensor}}} + +# infer the output tensor labels +# TODO: Use `symdiff` instead. +function infer_output(inputs::AbstractVector{<:AbstractVector{<:Index}}) + indslist = reduce(vcat, inputs) + # get output indices + iy = eltype(eltype(inputs))[] + for l in indslist + c = count(==(l), indslist) + if c == 1 + push!(iy, l) + elseif c !== 2 + error("Each index in a tensor network must appear at most twice!") + end + end + return iy +end + +# get a (labels, size_dict) representation of a collection of ITensors +function rawcode(tensors::ITensorList) + # we use id as the label + indsAs = [collect(Index{Int}, ITensors.inds(A)) for A in tensors] + ixs = collect.(inds.(tensors)) + unique_labels = unique(reduce(vcat, indsAs)) + size_dict = Dict([x => dim(x) for x in unique_labels]) + index_dict = Dict([x => x for x in unique_labels]) + return OMEinsumContractionOrders.EinCode(ixs, infer_output(indsAs)), size_dict, index_dict +end + +""" +$(TYPEDSIGNATURES) +Optimize the contraction order of a tensor network specified as a vector tensors. +Returns a [`NestedEinsum`](@ref) instance. +### Examples +```jldoctest +julia> using ITensors, ITensorContractionOrders +julia> i, j, k, l = Index(4), Index(5), Index(6), Index(7); +julia> x, y, z = randomITensor(i, j), randomITensor(j, k), randomITensor(k, l); +julia> net = optimize_contraction([x, y, z]; optimizer=TreeSA()); +``` +""" +function optimize_contraction_nested_einsum( + tensors::ITensorList; + optimizer::OMEinsumContractionOrders.CodeOptimizer=OMEinsumContractionOrders.TreeSA(), +) + r, size_dict, index_dict = rawcode(tensors) + # merge vectors can speed up contraction order finding + # optimize the permutation of tensors is set to true + res = OMEinsumContractionOrders.optimize_code( + r, size_dict, optimizer, OMEinsumContractionOrders.MergeVectors(), true + ) + if res isa OMEinsumContractionOrders.SlicedEinsum # slicing is not supported! + if length(res.slicing) != 0 + @warn "Slicing is not yet supported by `ITensors`, removing slices..." + end + res = res.eins + end + return res +end + +""" +Convert NestedEinsum to contraction sequence, such as `[[1, 2], [3, 4]]`. +""" +function convert_to_contraction_sequence(net::OMEinsumContractionOrders.NestedEinsum) + if OMEinsumContractionOrders.isleaf(net) + return net.tensorindex + else + return convert_to_contraction_sequence.(net.args) + end +end + +""" +Convert the result of `optimize_contraction` to a contraction sequence. +""" +function optimize_contraction_sequence( + tensors::ITensorList; optimizer::OMEinsumContractionOrders.CodeOptimizer=TreeSA() +) + res = optimize_contraction_nested_einsum(tensors; optimizer) + return convert_to_contraction_sequence(res) +end + +""" + GreedyMethod(; method=MinSpaceOut(), nrepeat=10) + +The fast but poor greedy optimizer. Input arguments are: + +* `method` is `MinSpaceDiff()` or `MinSpaceOut`. + * `MinSpaceOut` choose one of the contraction that produces a minimum output tensor size, + * `MinSpaceDiff` choose one of the contraction that decrease the space most. +* `nrepeat` is the number of repeatition, returns the best contraction order. +""" +function ITensorNetworks.contraction_sequence( + ::Algorithm"greedy", tn::Vector{ITensor}; kwargs... +) + return optimize_contraction_sequence( + tn; optimizer=OMEinsumContractionOrders.GreedyMethod(; kwargs...) + ) +end + +""" + TreeSA(; sc_target=20, βs=collect(0.01:0.05:15), ntrials=10, niters=50, + sc_weight=1.0, rw_weight=0.2, initializer=:greedy, greedy_config=GreedyMethod(; nrepeat=1)) + +Optimize the einsum contraction pattern using the simulated annealing on tensor expression tree. + +* `sc_target` is the target space complexity, +* `ntrials`, `βs` and `niters` are annealing parameters, doing `ntrials` indepedent annealings, each has inverse tempteratures specified by `βs`, in each temperature, do `niters` updates of the tree. +* `sc_weight` is the relative importance factor of space complexity in the loss compared with the time complexity. +* `rw_weight` is the relative importance factor of memory read and write in the loss compared with the time complexity. +* `initializer` specifies how to determine the initial configuration, it can be `:greedy` or `:random`. If it is using `:greedy` method to generate the initial configuration, it also uses two extra arguments `greedy_method` and `greedy_nrepeat`. +* `nslices` is the number of sliced legs, default is 0. +* `fixed_slices` is a vector of sliced legs, default is `[]`. + +### References +* [Recursive Multi-Tensor Contraction for XEB Verification of Quantum Circuits](https://arxiv.org/abs/2108.05665) +""" +function ITensorNetworks.contraction_sequence(::Algorithm"tree_sa", tn; kwargs...) + return optimize_contraction_sequence( + tn; optimizer=OMEinsumContractionOrders.TreeSA(; kwargs...) + ) +end + +""" + SABipartite(; sc_target=25, ntrials=50, βs=0.1:0.2:15.0, niters=1000 + max_group_size=40, greedy_config=GreedyMethod(), initializer=:random) + +Optimize the einsum code contraction order using the Simulated Annealing bipartition + Greedy approach. +This program first recursively cuts the tensors into several groups using simulated annealing, +with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`, +Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are: + +* `size_dict`, a dictionary that specifies leg dimensions, +* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`, +* `max_group_size` is the maximum size that allowed to used greedy search, +* `βs` is a list of inverse temperature `1/T`, +* `niters` is the number of iteration in each temperature, +* `ntrials` is the number of repetition (with different random seeds), +* `greedy_config` configures the greedy method, +* `initializer`, the partition configuration initializer, one can choose `:random` or `:greedy` (slow but better). + +### References +* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935) +""" +function ITensorNetworks.contraction_sequence(::Algorithm"sa_bipartite", tn; kwargs...) + return optimize_contraction_sequence( + tn; optimizer=OMEinsumContractionOrders.SABipartite(; kwargs...) + ) +end + +""" + KaHyParBipartite(; sc_target, imbalances=collect(0.0:0.005:0.8), + max_group_size=40, greedy_config=GreedyMethod()) + +Optimize the einsum code contraction order using the KaHyPar + Greedy approach. +This program first recursively cuts the tensors into several groups using KaHyPar, +with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`, +Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are: + +* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`, +* `imbalances` is a KaHyPar parameter that controls the group sizes in hierarchical bipartition, +* `max_group_size` is the maximum size that allowed to used greedy search, +* `greedy_config` is a greedy optimizer. + +### References +* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935) +* [Simulating the Sycamore quantum supremacy circuits](https://arxiv.org/abs/2103.03074) +""" +function ITensorNetworks.contraction_sequence(::Algorithm"kahypar_bipartite", tn; kwargs...) + return optimize_contraction_sequence( + tn; optimizer=OMEinsumContractionOrders.KaHyParBipartite(; kwargs...) + ) +end +end diff --git a/ext/ITensorNetworksObserversExt/ITensorNetworksObserversExt.jl b/ext/ITensorNetworksObserversExt/ITensorNetworksObserversExt.jl new file mode 100644 index 00000000..e5565d21 --- /dev/null +++ b/ext/ITensorNetworksObserversExt/ITensorNetworksObserversExt.jl @@ -0,0 +1,9 @@ +module ITensorNetworksObserversExt +using ITensorNetworks: ITensorNetworks +using Observers.DataFrames: AbstractDataFrame +using Observers: Observers + +function ITensorNetworks.update_observer!(observer::AbstractDataFrame; kwargs...) + return Observers.update!(observer; kwargs...) +end +end diff --git a/src/ITensorNetworks.jl b/src/ITensorNetworks.jl index 6184cd77..2eca4ad4 100644 --- a/src/ITensorNetworks.jl +++ b/src/ITensorNetworks.jl @@ -1,7 +1,6 @@ module ITensorNetworks include("lib/BaseExtensions/src/BaseExtensions.jl") include("lib/ITensorsExtensions/src/ITensorsExtensions.jl") -include("observers.jl") include("visualize.jl") include("graphs.jl") include("abstractindsnetwork.jl") @@ -33,6 +32,7 @@ include("caches/beliefpropagationcache.jl") include("contraction_tree_to_graph.jl") include("gauging.jl") include("utils.jl") +include("update_observer.jl") include("solvers/local_solvers/eigsolve.jl") include("solvers/local_solvers/exponentiate.jl") include("solvers/local_solvers/dmrg_x.jl") @@ -66,11 +66,7 @@ include("lib/ModelHamiltonians/src/ModelHamiltonians.jl") include("lib/ModelNetworks/src/ModelNetworks.jl") using PackageExtensionCompat: @require_extensions -using Requires: @require function __init__() @require_extensions - @require OMEinsumContractionOrders = "6f22d1fd-8eed-4bb7-9776-e7d684900715" include( - "requires/omeinsumcontractionorders.jl" - ) end end diff --git a/src/apply.jl b/src/apply.jl index 71c5637d..8548acc8 100644 --- a/src/apply.jl +++ b/src/apply.jl @@ -27,7 +27,6 @@ using ITensors.ITensorMPS: siteinds using KrylovKit: linsolve using LinearAlgebra: eigen, norm, svd using NamedGraphs: NamedEdge, has_edge -using Observers: Observers function full_update_bp( o, diff --git a/src/contract_approx/mincut.jl b/src/contract_approx/mincut.jl index 995a5e14..6c772894 100644 --- a/src/contract_approx/mincut.jl +++ b/src/contract_approx/mincut.jl @@ -1,8 +1,8 @@ using AbstractTrees: Leaves, PostOrderDFS using Combinatorics: powerset using Graphs: dijkstra_shortest_paths, weights -using GraphsFlows: GraphsFlows using NamedGraphs: NamedDiGraph +using NDTensors.AlgorithmSelection: Algorithm # a large number to prevent this edge being a cut MAX_WEIGHT = 1e32 @@ -37,6 +37,18 @@ function binary_tree_structure(tn::ITensorNetwork, outinds::Vector) return _binary_tree_structure(tn, outinds; maximally_unbalanced=false) end +function mincut(graph::AbstractGraph, source_vertex, target_vertex; backend, kwargs...) + # TODO: Replace with `Backend(backend)`. + return mincut(Algorithm(backend), graph, source_vertex, target_vertex; kwargs...) +end + +# TODO: Replace with `backend::Backend`. +function mincut( + backend::Algorithm, graph::AbstractGraph, source_vertex, target_vertex; kwargs... +) + return error("Backend `$backend` not implemented for `mincut`.") +end + """ Calculate the mincut between two subsets of the uncontracted inds (source_inds and terminal_inds) of the input tn. @@ -52,7 +64,7 @@ function _mincut(tn::ITensorNetwork, source_inds::Vector, terminal_inds::Vector) tn = disjoint_union( ITensorNetwork([ITensor(source_inds...), ITensor(terminal_inds...)]), tn ) - return GraphsFlows.mincut(tn, (1, 1), (2, 1), weights(tn)) + return mincut(tn, (1, 1), (2, 1); backend="GraphsFlows", capacity_matrix=weights(tn)) end """ diff --git a/src/contraction_sequences.jl b/src/contraction_sequences.jl index 3496340d..f97459f7 100644 --- a/src/contraction_sequences.jl +++ b/src/contraction_sequences.jl @@ -20,111 +20,3 @@ end function contraction_sequence(::Algorithm"optimal", tn::Vector{ITensor}) return optimal_contraction_sequence(tn) end - -function contraction_sequence_requires_error(module_name, algorithm) - return "Module `$(module_name)` not found, please type `using $(module_name)` before using the \"$(algorithm)\" contraction sequence backend!" -end - -""" - GreedyMethod(; method=MinSpaceOut(), nrepeat=10) - -The fast but poor greedy optimizer. Input arguments are: - -* `method` is `MinSpaceDiff()` or `MinSpaceOut`. - * `MinSpaceOut` choose one of the contraction that produces a minimum output tensor size, - * `MinSpaceDiff` choose one of the contraction that decrease the space most. -* `nrepeat` is the number of repeatition, returns the best contraction order. -""" -function contraction_sequence(::Algorithm"greedy", tn::Vector{ITensor}; kwargs...) - if !isdefined(@__MODULE__, :OMEinsumContractionOrders) - error(contraction_sequence_requires_error("OMEinsumContractionOrders", "greedy")) - end - return optimize_contraction_sequence( - tn; optimizer=OMEinsumContractionOrders.GreedyMethod(; kwargs...) - ) -end - -""" - TreeSA(; sc_target=20, βs=collect(0.01:0.05:15), ntrials=10, niters=50, - sc_weight=1.0, rw_weight=0.2, initializer=:greedy, greedy_config=GreedyMethod(; nrepeat=1)) - -Optimize the einsum contraction pattern using the simulated annealing on tensor expression tree. - -* `sc_target` is the target space complexity, -* `ntrials`, `βs` and `niters` are annealing parameters, doing `ntrials` indepedent annealings, each has inverse tempteratures specified by `βs`, in each temperature, do `niters` updates of the tree. -* `sc_weight` is the relative importance factor of space complexity in the loss compared with the time complexity. -* `rw_weight` is the relative importance factor of memory read and write in the loss compared with the time complexity. -* `initializer` specifies how to determine the initial configuration, it can be `:greedy` or `:random`. If it is using `:greedy` method to generate the initial configuration, it also uses two extra arguments `greedy_method` and `greedy_nrepeat`. -* `nslices` is the number of sliced legs, default is 0. -* `fixed_slices` is a vector of sliced legs, default is `[]`. - -### References -* [Recursive Multi-Tensor Contraction for XEB Verification of Quantum Circuits](https://arxiv.org/abs/2108.05665) -""" -function contraction_sequence(::Algorithm"tree_sa", tn; kwargs...) - if !isdefined(@__MODULE__, :OMEinsumContractionOrders) - error(contraction_sequence_requires_error("OMEinsumContractionOrders", "tree_sa")) - end - return optimize_contraction_sequence( - tn; optimizer=OMEinsumContractionOrders.TreeSA(; kwargs...) - ) -end - -""" - SABipartite(; sc_target=25, ntrials=50, βs=0.1:0.2:15.0, niters=1000 - max_group_size=40, greedy_config=GreedyMethod(), initializer=:random) - -Optimize the einsum code contraction order using the Simulated Annealing bipartition + Greedy approach. -This program first recursively cuts the tensors into several groups using simulated annealing, -with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`, -Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are: - -* `size_dict`, a dictionary that specifies leg dimensions, -* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`, -* `max_group_size` is the maximum size that allowed to used greedy search, -* `βs` is a list of inverse temperature `1/T`, -* `niters` is the number of iteration in each temperature, -* `ntrials` is the number of repetition (with different random seeds), -* `greedy_config` configures the greedy method, -* `initializer`, the partition configuration initializer, one can choose `:random` or `:greedy` (slow but better). - -### References -* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935) -""" -function contraction_sequence(::Algorithm"sa_bipartite", tn; kwargs...) - if !isdefined(@__MODULE__, :OMEinsumContractionOrders) - error(contraction_sequence_requires_error("OMEinsumContractionOrders", "sa_bipartite")) - end - return optimize_contraction_sequence( - tn; optimizer=OMEinsumContractionOrders.SABipartite(; kwargs...) - ) -end - -""" - KaHyParBipartite(; sc_target, imbalances=collect(0.0:0.005:0.8), - max_group_size=40, greedy_config=GreedyMethod()) - -Optimize the einsum code contraction order using the KaHyPar + Greedy approach. -This program first recursively cuts the tensors into several groups using KaHyPar, -with maximum group size specifed by `max_group_size` and maximum space complexity specified by `sc_target`, -Then finds the contraction order inside each group with the greedy search algorithm. Other arguments are: - -* `sc_target` is the target space complexity, defined as `log2(number of elements in the largest tensor)`, -* `imbalances` is a KaHyPar parameter that controls the group sizes in hierarchical bipartition, -* `max_group_size` is the maximum size that allowed to used greedy search, -* `greedy_config` is a greedy optimizer. - -### References -* [Hyper-optimized tensor network contraction](https://arxiv.org/abs/2002.01935) -* [Simulating the Sycamore quantum supremacy circuits](https://arxiv.org/abs/2103.03074) -""" -function contraction_sequence(::Algorithm"kahypar_bipartite", tn; kwargs...) - if !isdefined(@__MODULE__, :OMEinsumContractionOrders) - error( - contraction_sequence_requires_error("OMEinsumContractionOrders", "kahypar_bipartite") - ) - end - return optimize_contraction_sequence( - tn; optimizer=OMEinsumContractionOrders.KaHyParBipartite(; kwargs...) - ) -end diff --git a/src/lib/ITensorsExtensions/src/itensor.jl b/src/lib/ITensorsExtensions/src/itensor.jl index 94fc32b9..8c834830 100644 --- a/src/lib/ITensorsExtensions/src/itensor.jl +++ b/src/lib/ITensorsExtensions/src/itensor.jl @@ -32,7 +32,6 @@ using ITensors.NDTensors: DiagBlockSparseTensor, DenseTensor, BlockOffsets -using Observers: update!, insert_function! function NDTensors.blockoffsets(dense::DenseTensor) return BlockOffsets{ndims(dense)}([Block(ntuple(Returns(1), ndims(dense)))], [0]) diff --git a/src/observers.jl b/src/observers.jl deleted file mode 100644 index 4b668a71..00000000 --- a/src/observers.jl +++ /dev/null @@ -1,7 +0,0 @@ -# TODO: Move to `ITensorNetworksObserversExt`. -using Observers: Observers - -""" -Overload of `Observers.update!`. -""" -Observers.update!(::Nothing; kwargs...) = nothing diff --git a/src/requires/omeinsumcontractionorders.jl b/src/requires/omeinsumcontractionorders.jl deleted file mode 100644 index 032e76c4..00000000 --- a/src/requires/omeinsumcontractionorders.jl +++ /dev/null @@ -1,91 +0,0 @@ -using Dictionaries: index - -# OMEinsumContractionOrders wrapper for ITensors -# Slicing is not supported, because it might require extra work to slice an `ITensor` correctly. - -const ITensorList = Union{Vector{ITensor},Tuple{Vararg{ITensor}}} - -# TODO: Replace with `inds(A::ITensor)` or `collect(inds(A::ITensor))` -getid(index::Index) = index -getids(A::ITensor) = Index{Int}[getid(x) for x in ITensors.inds(A)] - -# infer the output tensor labels -# TODO: Use `symdiff` instead. -function infer_output(inputs::AbstractVector{<:AbstractVector{Index{IT}}}) where {IT} - indslist = vcat(inputs...) - # get output indices - iy = Index{IT}[] - for l in indslist - c = count(==(l), indslist) - if c == 1 - push!(iy, l) - elseif c !== 2 - error("Each index in a tensor network must appear at most twice!") - end - end - return iy -end - -# get a (labels, size_dict) representation of a collection of ITensors -function rawcode(tensors::ITensorList) - # we use id as the label - indsAs = [collect(Index{Int}, ITensors.inds(A)) for A in tensors] - ixs = [getids(x) for x in tensors] - unique_labels = unique(vcat(indsAs...)) - size_dict = Dict([getid(x) => x.space for x in unique_labels]) - index_dict = Dict([getid(x) => x for x in unique_labels]) - return OMEinsumContractionOrders.EinCode(ixs, getid.(infer_output(indsAs))), - size_dict, - index_dict -end - -""" -$(TYPEDSIGNATURES) -Optimize the contraction order of a tensor network specified as a vector tensors. -Returns a [`NestedEinsum`](@ref) instance. -### Examples -```jldoctest -julia> using ITensors, ITensorContractionOrders -julia> i, j, k, l = Index(4), Index(5), Index(6), Index(7); -julia> x, y, z = randomITensor(i, j), randomITensor(j, k), randomITensor(k, l); -julia> net = optimize_contraction([x, y, z]; optimizer=TreeSA()); -``` -""" -function optimize_contraction_nested_einsum( - tensors::ITensorList; optimizer::OMEinsumContractionOrders.CodeOptimizer=TreeSA() -) - r, size_dict, index_dict = rawcode(tensors) - # merge vectors can speed up contraction order finding - # optimize the permutation of tensors is set to true - res = OMEinsumContractionOrders.optimize_code( - r, size_dict, optimizer, OMEinsumContractionOrders.MergeVectors(), true - ) - if res isa OMEinsumContractionOrders.SlicedEinsum # slicing is not supported! - if length(res.slicing) != 0 - @warn "Slicing is not yet supported by `ITensors`, removing slices..." - end - res = res.eins - end - return res -end - -""" -Convert NestedEinsum to contraction sequence, such as `[[1, 2], [3, 4]]`. -""" -function convert_to_contraction_sequence(net::OMEinsumContractionOrders.NestedEinsum) - if OMEinsumContractionOrders.isleaf(net) - return net.tensorindex - else - return convert_to_contraction_sequence.(net.args) - end -end - -""" -Convert the result of `optimize_contraction` to a contraction sequence. -""" -function optimize_contraction_sequence( - tensors::ITensorList; optimizer::OMEinsumContractionOrders.CodeOptimizer=TreeSA() -) - res = optimize_contraction_nested_einsum(tensors; optimizer) - return convert_to_contraction_sequence(res) -end diff --git a/src/requires/omeinsumcontractionorders_itensorcontractiontree.jl b/src/requires/omeinsumcontractionorders_itensorcontractiontree.jl deleted file mode 100644 index e9a61547..00000000 --- a/src/requires/omeinsumcontractionorders_itensorcontractiontree.jl +++ /dev/null @@ -1,154 +0,0 @@ -""" -$(TYPEDEF) - ITensorContractionTree(args) -> ITensorContractionTree -Define a tensor network with its contraction order specified by a tree structure. -In this network, each index in this tensor network must appear either twice or once. -The input `args` is a Vector of [`ITensor`](@ref) or another layer of Vector. -This data type can be automatically generated from [`optimize_contraction`](@ref) function. -### Fields -$(TYPEDFIELDS) -### Examples -The following code creates a tensor network and evaluates it in a sequencial order. -```jldoctest -julia> using ITensors, ITensorContractionOrders -julia> i, j, k, l = Index(4), Index(5), Index(6), Index(7); -julia> x, y, z = randomITensor(i, j), randomITensor(j, k), randomITensor(k, l); -julia> it = ITensorContractionTree([[x, y] ,z]); -julia> itensor_list = ITensorContractionOrders.flatten(it); # convert this tensor network to a Vector of ITensors -julia> evaluate(it) ≈ foldl(*, itensor_list) -true -``` -""" -struct ITensorContractionTree{IT} - args::Vector{Union{ITensorContractionTree,ITensor}} - iy::Vector{Index{IT}} # the output labels, note: this is type unstable -end -ITensors.inds(it::ITensorContractionTree) = (it.iy...,) - -function ITensorContractionTree(args)::ITensorContractionTree - args = Union{ITensorContractionTree,ITensor}[ - arg isa Union{AbstractVector,Tuple} ? ITensorContractionTree(arg) : arg for arg in args - ] - # get output labels - # NOTE: here we assume the output index id has `Int` type - labels = collect.(Index{Int}, ITensors.inds.(args)) - return ITensorContractionTree(args, infer_output(labels)) -end - -""" - flatten(it::ITensorContractionTree) -> Vector -Convert an [`ITensorContractionTree`](@ref) to a Vector of [`ITensor`](@ref). -""" -flatten(it::ITensorContractionTree) = flatten!(it, ITensor[]) -function flatten!(it::ITensorContractionTree, lst) - for arg in it.args - if arg isa ITensor - push!(lst, arg) - else - flatten!(arg, lst) - end - end - return lst -end - -# Contract and evaluate an itensor network. -""" -$(TYPEDSIGNATURES) -""" -evaluate(it::ITensorContractionTree)::ITensor = foldl(*, evaluate.(it.args)) -evaluate(it::ITensor) = it - -getids(A::ITensorContractionTree) = collect(Index{Int}, getid.(ITensors.inds(A))) -function rootcode(it::ITensorContractionTree) - ixs = [getids(A) for A in it.args] - return OMEinsumContractionOrders.EinCode(ixs, it.iy) -end - -# decorate means converting the raw contraction pattern to ITensorContractionTree. -# `tensors` is the original input tensor list. -function decorate(net::OMEinsumContractionOrders.NestedEinsum, tensors::ITensorList) - if OMEinsumContractionOrders.isleaf(net) - return tensors[net.tensorindex] - else - return ITensorContractionTree(decorate.(net.args, Ref(tensors))) - end -end - -function update_size_index_dict!( - size_dict::Dict{Index{IT}}, index_dict::Dict{Index{IT}}, tensor::ITensor -) where {IT} - for ind in ITensors.inds(tensor) - size_dict[getid(ind)] = ind.space - index_dict[getid(ind)] = ind - end - return size_dict -end - -function rawcode!( - net::ITensorContractionTree{IT}, - size_dict::Dict{Index{IT}}, - index_dict::Dict{Index{IT}}, - index_counter=Base.RefValue(0), -) where {IT} - args = map(net.args) do s - if s isa ITensor - update_size_index_dict!(size_dict, index_dict, s) - OMEinsumContractionOrders.NestedEinsum{Index{IT}}(index_counter[] += 1) - else # ITensorContractionTree - scode = rawcode!(s, size_dict, index_dict, index_counter) - # no need to update size, size is only updated on the leaves. - scode - end - end - return OMEinsumContractionOrders.NestedEinsum(args, rootcode(net)) -end -function rawcode(net::ITensorContractionTree{IT}) where {IT} - size_dict = Dict{Index{IT},Int}() - index_dict = Dict{Index{IT},Index{Int}}() - r = rawcode!(net, size_dict, index_dict) - return r, size_dict, index_dict -end - -""" -$(TYPEDSIGNATURES) -""" -OMEinsumContractionOrders.peak_memory(net::ITensorContractionTree)::Int = - peak_memory(rawcode(net)[1:2]...) - -""" -$(TYPEDSIGNATURES) -""" -OMEinsumContractionOrders.flop(net::ITensorContractionTree)::Int = - flop(rawcode(net)[1:2]...) - -""" -$(TYPEDSIGNATURES) -""" -function OMEinsumContractionOrders.timespacereadwrite_complexity( - net::ITensorContractionTree -) - return OMEinsumContractionOrders.timespacereadwrite_complexity(rawcode(net)[1:2]...) -end - -""" -$(TYPEDSIGNATURES) -""" -function OMEinsumContractionOrders.timespace_complexity(net::ITensorContractionTree) - return OMEinsumContractionOrders.timespacereadwrite_complexity(rawcode(net)[1:2]...)[1:2] -end - -""" -$(TYPEDSIGNATURES) -""" -function OMEinsumContractionOrders.contraction_complexity(net::ITensorContractionTree) - return OMEinsumContractionOrders.contraction_complexity(rawcode(net)[1:2]...) -end - -""" -$(TYPEDSIGNATURES) - label_elimination_order(net::ITensorContractionTree) -> Vector -""" -function OMEinsumContractionOrders.label_elimination_order(net::ITensorContractionTree) - r, size_dict, index_dict = rawcode(net) - return getindex.(Ref(index_dict), label_elimination_order(r)) -end diff --git a/src/solvers/alternating_update/alternating_update.jl b/src/solvers/alternating_update/alternating_update.jl index 3b0f6b77..160e68f9 100644 --- a/src/solvers/alternating_update/alternating_update.jl +++ b/src/solvers/alternating_update/alternating_update.jl @@ -1,7 +1,6 @@ using ITensors: state using ITensors.ITensorMPS: linkind using NamedGraphs.GraphsExtensions: GraphsExtensions -using Observers: Observers function alternating_update( operator, @@ -66,7 +65,6 @@ function alternating_update( @assert !isnothing(sweep_plans) for which_sweep in eachindex(sweep_plans) sweep_plan = sweep_plans[which_sweep] - sweep_time = @elapsed begin for which_region_update in eachindex(sweep_plan) state, projected_operator = region_update( @@ -81,8 +79,7 @@ function alternating_update( ) end end - - Observers.update!( + update_observer!( sweep_observer!; state, which_sweep, sweep_time, outputlevel, sweep_plans ) !isnothing(sweep_printer) && diff --git a/src/solvers/alternating_update/region_update.jl b/src/solvers/alternating_update/region_update.jl index 7fae5d34..b92adc8c 100644 --- a/src/solvers/alternating_update/region_update.jl +++ b/src/solvers/alternating_update/region_update.jl @@ -1,6 +1,3 @@ -using ITensors.NDTensors: mindim -using Observers: Observers - #ToDo: generalize beyond 2-site #ToDo: remove concept of orthogonality center for generality function current_ortho(sweep_plan, which_region_update) @@ -93,10 +90,6 @@ function region_update( ) state = state![] projected_operator = projected_operator![] - if !(phi isa ITensor && info isa NamedTuple) - println("Solver returned the following types: $(typeof(phi)), $(typeof(info))") - error("In alternating_update, solver must return an ITensor and a NamedTuple") - end # ToDo: implement noise term as updater #drho = nothing #ortho = "left" #i guess with respect to ordered vertices that's valid but may be cleaner to use next_region logic @@ -107,11 +100,7 @@ function region_update( state, spec = inserter( state, phi, region, ortho_vertex; inserter_kwargs..., internal_kwargs ) - all_kwargs = (; - cutoff, - maxdim, - mindim, which_region_update, sweep_plan, total_sweep_steps=length(sweep_plan), @@ -125,8 +114,7 @@ function region_update( region_kwargs..., internal_kwargs..., ) - Observers.update!(region_observer!; all_kwargs...) + update_observer!(region_observer!; all_kwargs...) !(isnothing(region_printer)) && region_printer(; all_kwargs...) - return state, projected_operator end diff --git a/src/update_observer.jl b/src/update_observer.jl new file mode 100644 index 00000000..3b1b6ff9 --- /dev/null +++ b/src/update_observer.jl @@ -0,0 +1,11 @@ +function update_observer!(observer; kwargs...) + return error("Not implemented") +end + +# Default fallback if no observer is specified. +# Makes the source code a bit simpler, though +# maybe it is a bit too "tricky" and should be +# removed. +function update_observer!(observer::Nothing; kwargs...) + return nothing +end diff --git a/test/Project.toml b/test/Project.toml index 9e02c3ef..a5b76d30 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -12,7 +12,6 @@ ITensorGaussianMPS = "2be41995-7c9f-4653-b682-bfa4e7cebb93" ITensorNetworks = "2919e153-833c-4bdc-8836-1ea460a35fc7" ITensorUnicodePlots = "73163f41-4a9e-479f-8353-73bf94dbd758" ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -KaHyPar = "2a6221f6-aa48-11e9-3542-2d9e0ef01880" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Metis = "2679e427-3c69-5b7f-982b-ece356f1e94b" diff --git a/test/test_belief_propagation.jl b/test/test_belief_propagation.jl index 275d5b91..cf0cac0c 100644 --- a/test/test_belief_propagation.jl +++ b/test/test_belief_propagation.jl @@ -1,6 +1,8 @@ @eval module $(gensym()) using Compat: Compat using Graphs: vertices +# Trigger package extension. +using GraphsFlows: GraphsFlows using ITensorNetworks: ITensorNetworks, BeliefPropagationCache, diff --git a/test/test_binary_tree_partition.jl b/test/test_binary_tree_partition.jl index 499c7547..c5fc6a85 100644 --- a/test/test_binary_tree_partition.jl +++ b/test/test_binary_tree_partition.jl @@ -1,6 +1,8 @@ @eval module $(gensym()) using DataGraphs: DataGraph, underlying_graph, vertex_data using Graphs: add_vertex!, vertices +# Trigger package extension. +using GraphsFlows: GraphsFlows using ITensors: Index, ITensor, contract, noncommoninds, randomITensor using ITensors.ITensorMPS: MPS using ITensorNetworks: @@ -103,7 +105,7 @@ end for alg in ["density_matrix", "ttn_svd"] approx_tn, lognorm = contract( tn; - alg=alg, + alg, output_structure=structure, contraction_sequence_kwargs=(; alg="sa_bipartite"), ) diff --git a/test/test_contract_deltas.jl b/test/test_contract_deltas.jl index 24ef9bc4..fa6ade3b 100644 --- a/test/test_contract_deltas.jl +++ b/test/test_contract_deltas.jl @@ -1,5 +1,7 @@ @eval module $(gensym()) using Graphs: dfs_tree, nv, vertices +# Trigger package extension. +using GraphsFlows: GraphsFlows using ITensors: Index, ITensor, delta, noncommoninds, randomITensor using ITensorNetworks: IndsNetwork, diff --git a/test/test_contraction_sequence.jl b/test/test_contraction_sequence.jl index e0fab70c..40ea6fc0 100644 --- a/test/test_contraction_sequence.jl +++ b/test/test_contraction_sequence.jl @@ -41,11 +41,12 @@ Random.seed!(1234) # KaHyPar doesn't work on Windows # https://github.com/kahypar/KaHyPar.jl/issues/9 using Pkg - Pkg.add("KaHyPar") + Pkg.add("KaHyPar"; io=devnull) using KaHyPar seq_kahypar_bipartite = contraction_sequence( tn; alg="kahypar_bipartite", sc_target=200 ) + Pkg.rm("KaHyPar"; io=devnull) res_kahypar_bipartite = contract(tn; sequence=seq_kahypar_bipartite)[] @test res_optimal ≈ res_kahypar_bipartite seq_einexprs_kahypar = contraction_sequence(tn; alg="einexpr", optimizer=HyPar())