diff --git a/Project.toml b/Project.toml index 97832e9f..253b3627 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "ITensorNetworks" uuid = "2919e153-833c-4bdc-8836-1ea460a35fc7" -version = "0.15.20" +version = "0.15.21" authors = ["Matthew Fishman , Joseph Tindall and contributors"] [workspace] diff --git a/docs/Project.toml b/docs/Project.toml index eff97ff6..579db99f 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -1,5 +1,6 @@ [deps] Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" ITensorNetworks = "2919e153-833c-4bdc-8836-1ea460a35fc7" Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" @@ -7,6 +8,6 @@ Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" path = ".." [compat] -Documenter = "1.10" +Documenter = "1" ITensorNetworks = "0.15" Literate = "2.20.1" diff --git a/docs/make.jl b/docs/make.jl index dfae5608..8750e23c 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,5 +1,8 @@ using Documenter: Documenter, DocMeta, deploydocs, makedocs +using Graphs: Graphs using ITensorNetworks: ITensorNetworks +using ITensors: ITensors +using LinearAlgebra: LinearAlgebra DocMeta.setdocmeta!( ITensorNetworks, :DocTestSetup, :(using ITensorNetworks); recursive = true @@ -16,7 +19,16 @@ makedocs(; edit_link = "main", assets = ["assets/favicon.ico", "assets/extras.css"] ), - pages = ["Home" => "index.md", "Reference" => "reference.md"], + pages = [ + "Home" => "index.md", + "Manual" => [ + "Tensor Networks" => "tensor_networks.md", + "Tree Tensor Networks" => "tree_tensor_networks.md", + "Computing Properties" => "computing_properties.md", + "Solvers" => "solvers.md", + ], + "API Reference" => "reference.md", + ], warnonly = true ) diff --git a/docs/src/computing_properties.md b/docs/src/computing_properties.md new file mode 100644 index 00000000..e329e7fc --- /dev/null +++ b/docs/src/computing_properties.md @@ -0,0 +1,104 @@ +# Computing Properties + +## Inner Products and Norms + +For general `ITensorNetwork` states, inner products are computed by constructing and +contracting the combined bra–ket network. The default algorithm is **belief propagation** +(`alg="bp"`), which is efficient for large and loopy networks. Use `alg="exact"` for +exact contraction (only practical for small networks or trees). + +```julia +z = inner(phi, psi) # ⟨ϕ|ψ⟩ (belief propagation by default) +z = inner(phi, psi; alg="exact") # ⟨ϕ|ψ⟩ (exact contraction) +z = inner(phi, A, psi) # ⟨ϕ|A|ψ⟩ +n = norm(psi) # √⟨ψ|ψ⟩ +``` + +For numerically large tensor networks where the inner product would overflow, use the +logarithmic variant: + +```julia +logz = loginner(phi, psi) # log(⟨ϕ|ψ⟩) (numerically stable) +``` + +For `TreeTensorNetwork`, specialised exact methods exploit the tree structure directly +without belief propagation: + +```julia +z = inner(x, y) # ⟨x|y⟩ via DFS contraction +z = inner(y, A, x) # ⟨y|A|x⟩ +n = norm(psi) # uses ortho_region if available for efficiency +``` + +```@docs; canonical=false +ITensors.inner(::ITensorNetworks.AbstractITensorNetwork, ::ITensorNetworks.AbstractITensorNetwork) +ITensors.inner(::ITensorNetworks.AbstractITensorNetwork, ::ITensorNetworks.AbstractITensorNetwork, ::ITensorNetworks.AbstractITensorNetwork) +ITensorNetworks.loginner +ITensors.inner(::ITensorNetworks.AbstractTreeTensorNetwork, ::ITensorNetworks.AbstractTreeTensorNetwork) +ITensors.inner(::ITensorNetworks.AbstractTreeTensorNetwork, ::ITensorNetworks.AbstractTreeTensorNetwork, ::ITensorNetworks.AbstractTreeTensorNetwork) +``` + +## Normalization + +`normalize` rescales all tensors in the network by the same factor so that `norm(ψ) ≈ 1`. +For `TreeTensorNetwork`, the normalisation is applied directly at the orthogonality centre. + +```julia +psi = normalize(psi) # exact (default) +psi = normalize(psi; alg = "bp") # belief-propagation (for large loopy networks) +``` + +```@docs; canonical=false +LinearAlgebra.normalize(::ITensorNetworks.AbstractITensorNetwork) +``` + +## Expectation Values + +### General `ITensorNetwork` + +For arbitrary (possibly loopy) tensor networks, expectation values are computed via +**belief propagation** by default. This is approximate for loopy networks but can be made +exact with `alg="exact"` (at exponential cost). + +```julia +# Expectation of "Sz" at every vertex +sz = expect(psi, "Sz") + +# Selected vertices only +sz = expect(psi, "Sz", [(1,), (3,), (5,)]) + +# Exact contraction +sz = expect(psi, "Sz"; alg = "exact") +``` + +When computing multiple operators on the same state, reuse the belief propagation cache +to avoid redundant work: + +```julia +using ITensors: Op +sz = expect(psi, Op("Sz", v)) # single-operator form +``` + +```@docs; canonical=false +ITensorNetworks.expect(::ITensorNetworks.AbstractITensorNetwork, ::String) +ITensorNetworks.expect(::ITensorNetworks.AbstractITensorNetwork, ::String, ::Any) +ITensorNetworks.expect(::ITensorNetworks.AbstractITensorNetwork, ::ITensors.Ops.Op) +``` + +### `TreeTensorNetwork` + +For TTN/MPS states, a specialised exact method exploiting successive orthogonalisations is +available. The operator name is passed as the **first** argument (note the different +argument order from the general form above): + +```julia +sz = expect("Sz", psi) # all sites +sz = expect("Sz", psi; vertices = [1, 3, 5]) # selected sites +``` + +This is more efficient than the belief propagation approach for tree-structured networks +because it reuses the orthogonal gauge. + +```@docs; canonical=false +ITensorNetworks.expect(::String, ::ITensorNetworks.AbstractTreeTensorNetwork) +``` diff --git a/docs/src/reference.md b/docs/src/reference.md index 3dc55f96..fc5be6d1 100644 --- a/docs/src/reference.md +++ b/docs/src/reference.md @@ -1,4 +1,7 @@ -# Reference +# API Reference + +Complete listing of all documented public functions and types in ITensorNetworks.jl, +ITensorNetworks.ModelNetworks, and ITensorNetworks.ModelHamiltonians. ```@autodocs Modules = [ITensorNetworks, ITensorNetworks.ModelNetworks, ITensorNetworks.ModelHamiltonians] diff --git a/docs/src/solvers.md b/docs/src/solvers.md new file mode 100644 index 00000000..c5755ef0 --- /dev/null +++ b/docs/src/solvers.md @@ -0,0 +1,58 @@ +# Solvers + +ITensorNetworks.jl provides sweep-based solvers for variational problems on tree tensor +networks. All solvers follow the same high-level pattern: + +1. Start from an initial `ITensorNetwork` guess. +2. Sweep over the network, solving a small local problem at each site or pair of sites. +3. After each local solve, truncate the updated bond to control bond dimension growth. +4. Repeat for `nsweeps` sweeps. + +## Eigenvalue Problems — `eigsolve` / `dmrg` + +[`eigsolve`](@ref ITensorNetworks.eigsolve) finds the lowest eigenvalue and corresponding +eigenvector of an operator (e.g. a Hamiltonian) using a DMRG-like +variational sweep algorithm. +[`dmrg`](@ref ITensorNetworks.dmrg) is an alias for `eigsolve`. + +```julia +using NamedGraphs.NamedGraphGenerators: named_comb_tree +using ITensors: OpSum +using ITensorNetworks: dmrg, dst, edges, normalize, random_ttn, siteinds, src, ttn +using TensorOperations + +let +# Build a Heisenberg Hamiltonian on a comb tree +g = named_comb_tree((4, 3)) +s = siteinds("S=1/2", g) +h = OpSum() +for e in edges(g) + h += 0.5, "S+", src(e), "S-", dst(e) + h += 0.5, "S-", src(e), "S+", dst(e) + h += "Sz", src(e), "Sz", dst(e) +end +H = ttn(h, s) + +# Random initial state (normalise first!) +psi0 = normalize(random_ttn(s; link_space = 4)) + +# Run DMRG +energy, psi = dmrg(H, psi0; + nsweeps = 10, + nsites = 2, + factorize_kwargs = (; cutoff = 1e-10, maxdim = 50), + outputlevel = 1, +) +end +``` + +```@docs +ITensorNetworks.eigsolve +ITensorNetworks.dmrg +``` + +## Time Evolution — `time_evolve` + +```@docs +ITensorNetworks.time_evolve +``` diff --git a/docs/src/tensor_networks.md b/docs/src/tensor_networks.md new file mode 100644 index 00000000..12ae66db --- /dev/null +++ b/docs/src/tensor_networks.md @@ -0,0 +1,104 @@ +# Tensor Networks + +## The `ITensorNetwork` Type + +An `ITensorNetwork` is the central data structure of this package. It represents a +collection of [`ITensor`](https://itensor.github.io/ITensors.jl/stable/)s arranged on a +graph, where each edge encodes a shared (contracted) index between the neighboring tensors. + +Key facts: + +- The underlying graph is a [`NamedGraph`](https://github.com/ITensor/NamedGraphs.jl), so + vertices can be any hashable Julia value: integers, tuples, strings, etc. +- Each vertex holds exactly one `ITensor`. +- Edges and link indices are either inferred from shared `Index` objects (when constructing + from a collection of `ITensor`s) or inserted automatically (when constructing from an + `IndsNetwork`). + +## Construction + +The most common entry point is an `IndsNetwork` — a graph whose vertices and edges carry +`Index` objects. Generate site indices with the `siteinds` function which takes a site +type string (such as "S=1/2" or "Electron") and a NamedGraph. The NamedGraph can be +generated from functions such as `named_grid`, `named_comb_tree`, etc. from the NamedGraphs.jl +`NamedGraphGenerators` module: + +```julia +using ITensorNetworks, ITensors, NamedGraphs.NamedGraphGenerators + +# 3×3 square-lattice tensor network +g = named_grid((3, 3)) +s = siteinds("S=1/2", g) # one spin-½ Index per vertex + +# Zero-initialized, bond dimension 2 +ψ = ITensorNetwork(s; link_space = 2) + +# Product state — every site in the |↑⟩ state +ψ = ITensorNetwork("Up", s) + +# Staggered initialization with a vertex-dependent function +ψ = ITensorNetwork(v -> isodd(sum(v)) ? "Up" : "Dn", s) +``` + +When you already have `ITensor`s in hand, edges are inferred automatically from shared +indices: + +```julia +i, j, k = Index(2,"i"), Index(2,"j"), Index(2,"k") +A, B, C = ITensor(i,j), ITensor(j,k), ITensor(k) + +tn = ITensorNetwork([A, B, C]) # integer vertices 1, 2, 3 +tn = ITensorNetwork(["A","B","C"], [A, B, C]) # named vertices +tn = ITensorNetwork(["A"=>A, "B"=>B, "C"=>C]) # from pairs +``` + +```@docs; canonical=false +ITensorNetworks.ITensorNetwork +``` + +## Accessing Data + +```julia +ψ[(1,2)] # ITensor at vertex (1,2) +ψ[(1,2)] = T # replace tensor at a vertex +vertices(ψ) # all vertex labels +edges(ψ) # all edges +neighbors(ψ, v) # neighbouring vertices of v +nv(ψ), ne(ψ) # vertex / edge counts +siteinds(ψ) # IndsNetwork of site (physical) indices +linkinds(ψ) # IndsNetwork of bond (virtual) indices +``` + +## Adding Two `ITensorNetwork`s + +Two networks with the same graph and site indices can be added. The result represents the +quantum state `ψ₁ + ψ₂` and has bond dimension equal to the **sum** of the two input bond +dimensions. Individual bonds of the result can be recompressed with `truncate(tn, edge)`. +For `TreeTensorNetwork`, the no-argument form `truncate(ttn; kwargs...)` sweeps and +recompresses all bonds at once. + +```julia +ψ12 = add(ψ1, ψ2) +``` + +```@docs; canonical=false +ITensorNetworks.add(::ITensorNetworks.AbstractITensorNetwork, ::ITensorNetworks.AbstractITensorNetwork) +``` + +## Bond Truncation + +A single bond (edge) of any `ITensorNetwork` can be truncated by SVD: + +```julia +tn = truncate(tn, (1,2) => (1,3)) # truncate the bond between vertices (1,2) and (1,3) +tn = truncate(tn, edge) # or pass an AbstractEdge directly +``` + +Truncation parameters (`cutoff`, `maxdim`, `mindim`, …) are forwarded to `ITensors.svd`. +For a `TreeTensorNetwork`, the sweep-based `truncate(ttn; kwargs...)` is usually more +convenient because it recompresses the entire network at once with controlled errors; +see the [Tree Tensor Networks](@ref) page. + +```@docs; canonical=false +Base.truncate(::ITensorNetworks.AbstractITensorNetwork, ::Graphs.AbstractEdge) +``` diff --git a/docs/src/tree_tensor_networks.md b/docs/src/tree_tensor_networks.md new file mode 100644 index 00000000..f978664f --- /dev/null +++ b/docs/src/tree_tensor_networks.md @@ -0,0 +1,148 @@ +# Tree Tensor Networks + +## Overview + +A `TreeTensorNetwork` (alias `TTN`) is an `ITensorNetwork` whose underlying graph is a +**tree** (no cycles). This additional structure enables exact, efficient canonical gauges +via QR decomposition — a key ingredient in variational algorithms such as DMRG and TDVP. + +A `TreeTensorNetwork` carries an extra piece of metadata: the `ortho_region`, which +records which vertices currently form the orthogonality centre of the network. Algorithms +update this field as the gauge changes. + +**MPS** (matrix product states) are the special case of a `TreeTensorNetwork` on a +1D path graph. The [`mps`](@ref ITensorNetworks.mps) constructor enforces this topology +and provides a convenient interface for 1D calculations. + +## Construction + +### From an `IndsNetwork` or graph + +```julia +using NamedGraphs.NamedGraphGenerators: named_comb_tree +using ITensorNetworks: mps, random_mps, random_ttn, siteinds, ttn + +let +# Comb-tree TTN (a popular tree topology for 2D-like systems) +g = named_comb_tree((4, 3)) +sites = siteinds("S=1/2", g) + +psi = ttn(sites) # zero-initialised +psi = ttn(v -> "Up", sites) # product state + +# Random, normalised TTN +psi = random_ttn(sites; link_space = 4) + +# 1D MPS +s1d = siteinds("S=1/2", 10) +mps_state = mps(v -> "Up", s1d) # product MPS +mps_state = random_mps(s1d; link_space = 4) +end +``` + +```@docs; canonical=false +ITensorNetworks.ttn +ITensorNetworks.mps +ITensorNetworks.random_ttn +ITensorNetworks.random_mps +``` + +### The `TreeTensorNetwork` type and conversion + +The `TreeTensorNetwork` struct wraps an `ITensorNetwork` and records the current +orthogonality region. Use the `TreeTensorNetwork` constructor to convert a plain +`ITensorNetwork` with tree topology into a `TTN`, and `ITensorNetwork` to strip the +gauge metadata when you need a plain network again. + +```julia +itn = ITensorNetwork(sites; link_space = 2) +psi = TreeTensorNetwork(itn) # ITensorNetwork → TTN +itn = ITensorNetwork(psi) # TTN → ITensorNetwork +``` + +```@docs; canonical=false +ITensorNetworks.TreeTensorNetwork +ITensorNetworks.ITensorNetwork(::ITensorNetworks.TreeTensorNetwork) +``` + +### From a dense `ITensor` + +A dense tensor can be decomposed into a TTN by successive QR/SVD factorisations along the +tree edges. Truncation parameters (e.g. `cutoff`, `maxdim`) are forwarded to the +factorisation step. + +```julia +g = named_comb_tree((3,1)) +sites = siteinds("S=1/2",g) +A = ITensors.random_itensor(sites[(1,1)], sites[(2,1)], sites[(3,1)]) +ttn_A = ttn(A, sites) +``` + +```@docs +ITensorNetworks.ttn(::ITensors.ITensor, ::ITensorNetworks.IndsNetwork) +``` + +## Orthogonal Gauge + +One of the most powerful features of tree tensor networks is the ability to bring the +network into an **orthogonal gauge** in linear time. When the network is in a gauge +centred on vertex `v`, all tensors away from `v` are isometric with respect to the bond +pointing toward `v`. This makes computing local observables, inner products, and +eigenvalue problems numerically efficient and stable. + +The current orthogonality centre is tracked by the `ortho_region` field. + +```julia +psi = orthogonalize(psi, v) # QR-sweep to put ortho centre at vertex v +psi = orthogonalize(psi, [v1, v2]) # two-site centre (for nsites=2 sweeps) + +ortho_region(psi) # query current ortho region (returns an index set) +psi = set_ortho_region(psi, vs) # update metadata only, no tensor operations +``` + +```@docs; canonical=false +ITensorNetworks.orthogonalize +ITensorNetworks.ortho_region +ITensorNetworks.set_ortho_region +``` + +## Bond Truncation + +After algorithms that grow the bond dimension (e.g. addition, subspace expansion), use +`truncate` to recompress the network. For `TreeTensorNetwork` there are two forms: + +- **Whole-network recompression** (TTN-specific): `truncate(ttn; kwargs...)` sweeps from + the leaves to the root, orthogonalising and truncating every bond in sequence. This is + the preferred form after addition or DMRG expansion. +- **Single-bond truncation** (available for any `ITensorNetwork`): `truncate(tn, edge; + kwargs...)` truncates one bond by SVD — see the [Tensor Networks](@ref) page. + +```julia +psi = truncate(psi; cutoff = 1e-10, maxdim = 50) +``` + +The sweep-based form orthogonalises each bond before truncating it, so truncation errors +are controlled. All keyword arguments accepted by `ITensors.svd` (e.g. `cutoff`, `maxdim`, +`mindim`) are forwarded. + +```@docs; canonical=false +Base.truncate(::ITensorNetworks.AbstractTreeTensorNetwork) +``` + +## Addition and Arithmetic + +Two TTNs with the same graph and site indices can be summed. The result has bond +dimension equal to the **sum** of the two inputs, and can be recompressed with `truncate`. + +```julia +psi3 = psi1 + psi2 # or add(psi1, psi2) +psi3 = truncate(psi3; cutoff = 1e-10, maxdim = 50) + +2.0 * psi # scalar multiplication +psi / norm(psi) # manual normalisation +``` + +```@docs; canonical=false +ITensorNetworks.add(::ITensorNetworks.AbstractTreeTensorNetwork, ::ITensorNetworks.AbstractTreeTensorNetwork) +Base.:+(::ITensorNetworks.AbstractTreeTensorNetwork...) +``` diff --git a/src/abstractitensornetwork.jl b/src/abstractitensornetwork.jl index d8cbb6f3..76f09692 100644 --- a/src/abstractitensornetwork.jl +++ b/src/abstractitensornetwork.jl @@ -681,6 +681,24 @@ function _truncate_edge(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs.. return tn end +""" + truncate(tn::AbstractITensorNetwork, edge; kwargs...) -> ITensorNetwork + +Truncate the bond across `edge` in `tn` by performing an SVD and discarding small +singular values. `edge` may be an `AbstractEdge` or a `Pair` of vertices. + +Truncation parameters are passed as keyword arguments and forwarded to `ITensors.svd`: + + - `cutoff`: Drop singular values smaller than this threshold. + - `maxdim`: Maximum number of singular values to keep. + - `mindim`: Minimum number of singular values to keep. + +This operates on a single bond. For `TreeTensorNetwork`, the no-argument form +`truncate(ttn; kwargs...)` sweeps all bonds and is generally preferred for full +recompression after addition or subspace expansion. + +See also: `Base.truncate(::AbstractTreeTensorNetwork)`. +""" function Base.truncate(tn::AbstractITensorNetwork, edge::AbstractEdge; kwargs...) return _truncate_edge(tn, edge; kwargs...) end @@ -885,7 +903,17 @@ is_multi_edge(tn::AbstractITensorNetwork, e) = length(linkinds(tn, e)) > 1 is_multi_edge(tn::AbstractITensorNetwork) = Base.Fix1(is_multi_edge, tn) """ -Add two itensornetworks together by growing the bond dimension. The network structures need to be have the same vertex names, same site index on each vertex + add(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) -> ITensorNetwork + +Add two `ITensorNetwork`s together by taking their direct sum (growing the bond dimension). +The result represents the state `tn1 + tn2`, with bond dimension on each edge equal to the +sum of the bond dimensions of `tn1` and `tn2`. + +Both networks must have the same vertex set and matching site indices at each vertex. + +Use `truncate` on the result to compress back to a lower bond dimension. + +See also: `Base.:+` for `TreeTensorNetwork`, `truncate`. """ function add(tn1::AbstractITensorNetwork, tn2::AbstractITensorNetwork) @assert issetequal(vertices(tn1), vertices(tn2)) diff --git a/src/expect.jl b/src/expect.jl index 8a743a98..c254d976 100644 --- a/src/expect.jl +++ b/src/expect.jl @@ -46,16 +46,67 @@ function expect(alg::Algorithm"exact", ψ::AbstractITensorNetwork, ops; kwargs.. return map(op -> expect(ψIψ, op; alg, kwargs...), ops) end +""" + expect(ψ::AbstractITensorNetwork, op::Op; alg="bp", kwargs...) -> Number + +Compute the expectation value ⟨ψ|op|ψ⟩ / ⟨ψ|ψ⟩ for a single `ITensors.Op` object. + +The default algorithm is belief propagation (`"bp"`); use `alg="exact"` for exact +contraction. + +See also: [`expect(ψ, op::String)`](@ref). +""" function expect(ψ::AbstractITensorNetwork, op::Op; alg = default_expect_alg(), kwargs...) return expect(Algorithm(alg), ψ, [op]; kwargs...) end +""" + expect(ψ::AbstractITensorNetwork, op::String, vertices; alg="bp", kwargs...) -> Dictionary + +Compute local expectation values ⟨ψ|op_v|ψ⟩ / ⟨ψ|ψ⟩ for the operator named `op` at each +vertex in `vertices`. + +See [`expect(ψ, op::String)`](@ref) for full documentation. +""" function expect( ψ::AbstractITensorNetwork, op::String, vertices; alg = default_expect_alg(), kwargs... ) return expect(Algorithm(alg), ψ, [Op(op, vertex) for vertex in vertices]; kwargs...) end +""" + expect(ψ::AbstractITensorNetwork, op::String; alg="bp", kwargs...) -> Dictionary + +Compute local expectation values ⟨ψ|op_v|ψ⟩ / ⟨ψ|ψ⟩ for the operator named `op` at every +vertex of `ψ`. + +# Arguments + + - `ψ`: The tensor network state. + - `op`: Name of the local operator (e.g. `"Sz"`, `"N"`, `"Sx"`), passed to `ITensors.op`. + - `alg="bp"`: Contraction algorithm. `"bp"` uses belief propagation (efficient for + loopy or large networks); `"exact"` performs full contraction. + +# Keyword Arguments (alg="bp" only) + + - `cache!`: Optional `Ref` to a pre-built belief propagation cache. If provided, + the cache is reused across multiple `expect` calls for efficiency. + - `update_cache=true`: Whether to update the cache before computing expectation values. + +# Returns + +A `Dictionary` mapping each vertex of `ψ` to its expectation value. + +# Example + +```julia +sz = expect(psi, "Sz") # all sites, belief propagation +sz = expect(psi, "Sz"; alg = "exact") # exact contraction +``` + +See also: [`expect(ψ, op::String, vertices)`](@ref), +[`expect(operator, state::AbstractTreeTensorNetwork)`](@ref). +""" function expect( ψ::AbstractITensorNetwork, op::String; diff --git a/src/inner.jl b/src/inner.jl index 95677cdd..3733de48 100644 --- a/src/inner.jl +++ b/src/inner.jl @@ -3,6 +3,18 @@ using LinearAlgebra: norm, norm_sqr default_contract_alg(tns::Tuple) = "bp" +""" + inner(ϕ::AbstractITensorNetwork, ψ::AbstractITensorNetwork; alg="bp", kwargs...) -> Number + +Compute the inner product ⟨ϕ|ψ⟩ by contracting the combined bra-ket network. + +# Keyword Arguments + + - `alg="bp"`: Contraction algorithm. `"bp"` uses belief propagation (default, efficient + for large or loopy networks); `"exact"` uses full contraction with an optimized sequence. + +See also: [`loginner`](@ref ITensorNetworks.loginner), `norm`, [`inner(ϕ, A, ψ)`](@ref ITensorNetworks.inner). +""" function ITensors.inner( ϕ::AbstractITensorNetwork, ψ::AbstractITensorNetwork; @@ -12,6 +24,17 @@ function ITensors.inner( return inner(Algorithm(alg), ϕ, ψ; kwargs...) end +""" + inner(ϕ::AbstractITensorNetwork, A::AbstractITensorNetwork, ψ::AbstractITensorNetwork; alg="bp", kwargs...) -> Number + +Compute the matrix element ⟨ϕ|A|ψ⟩ where `A` is a tensor network operator. + +# Keyword Arguments + + - `alg="bp"`: Contraction algorithm. `"bp"` (default) or `"exact"`. + +See also: [`inner(ϕ, ψ)`](@ref). +""" function ITensors.inner( ϕ::AbstractITensorNetwork, A::AbstractITensorNetwork, @@ -53,6 +76,20 @@ function ITensors.inner( return scalar(tn; sequence) end +""" + loginner(ϕ::AbstractITensorNetwork, ψ::AbstractITensorNetwork; alg="bp", kwargs...) -> Number + +Compute `log(⟨ϕ|ψ⟩)` in a numerically stable way by accumulating logarithms during +contraction rather than computing the inner product directly. + +Useful when the inner product would overflow or underflow in floating-point arithmetic. + +# Keyword Arguments + + - `alg="bp"`: Contraction algorithm, `"bp"` (default) or `"exact"`. + +See also: [`inner`](@ref ITensorNetworks.inner), `lognorm`. +""" function loginner( ϕ::AbstractITensorNetwork, ψ::AbstractITensorNetwork; diff --git a/src/itensornetwork.jl b/src/itensornetwork.jl index a4c8e2cd..0e50cdd3 100644 --- a/src/itensornetwork.jl +++ b/src/itensornetwork.jl @@ -7,7 +7,71 @@ using NamedGraphs: NamedGraphs, NamedEdge, NamedGraph, vertextype struct Private end """ - ITensorNetwork + ITensorNetwork{V} + +A tensor network where each vertex holds an `ITensor`. The network graph is a +`NamedGraph{V}` and edges represent shared indices between neighboring tensors. + +# Constructors + +**From an `IndsNetwork` (most common):** + +```julia +ITensorNetwork(is::IndsNetwork; link_space = 1) +ITensorNetwork(f, is::IndsNetwork; link_space = 1) +ITensorNetwork(eltype, undef, is::IndsNetwork; link_space = 1) +``` + + - With no function argument `f`, tensors are initialized to zero. + - With a function `f(v)` that returns a state label (e.g. `"Up"`, `"Dn"`) or + an `ITensor` constructor, tensors are initialized accordingly. + - `link_space` controls the bond-index dimension (default 1). + +**From a graph (site indices inferred as trivial):** + +```julia +ITensorNetwork(graph::AbstractNamedGraph; link_space = 1) +ITensorNetwork(f, graph::AbstractNamedGraph; link_space = 1) +``` + +**From a collection of `ITensor`s:** + +```julia +ITensorNetwork(ts::AbstractVector{ITensor}) +ITensorNetwork(vs, ts::AbstractVector{ITensor}) +ITensorNetwork(ts::AbstractVector{<:Pair{<:Any, ITensor}}) +ITensorNetwork(ts::AbstractDict{<:Any, ITensor}) +``` + +Edges are inferred from shared indices between tensors. + +**From a single `ITensor`:** + +```julia +ITensorNetwork(t::ITensor) +``` + +Wraps the tensor in a single-vertex network. + +# Example + +```julia +using ITensorNetworks, ITensors, NamedGraphs.NamedGraphGenerators + +g = named_grid((4,)) +s = siteinds("S=1/2", g) + +# Zero-initialized network with bond dimension 2 +tn = ITensorNetwork(s; link_space = 2) + +# Product state initialized to "Up" on every site +tn = ITensorNetwork("Up", s) + +# Random state +tn = ITensorNetwork(v -> randn, s; link_space = 4) +``` + +See also: `IndsNetwork`, [`ttn`](@ref ITensorNetworks.ttn), [`TreeTensorNetwork`](@ref ITensorNetworks.TreeTensorNetwork). """ struct ITensorNetwork{V} <: AbstractITensorNetwork{V} data_graph::DataGraph{V, ITensor, ITensor, NamedGraph{V}, NamedEdge{V}} diff --git a/src/normalize.jl b/src/normalize.jl index 79cfadce..d0c1bc22 100644 --- a/src/normalize.jl +++ b/src/normalize.jl @@ -35,6 +35,27 @@ function rescale( return tensornetwork(cache![]) end +""" + normalize(tn::AbstractITensorNetwork; alg="exact", kwargs...) -> AbstractITensorNetwork + +Return a copy of `tn` rescaled so that `norm(tn) ≈ 1`. + +The rescaling is distributed evenly across all tensors in the network (each tensor is +multiplied by the same scalar factor). + +# Keyword Arguments + + - `alg="exact"`: Normalization algorithm. `"exact"` contracts ⟨ψ|ψ⟩ exactly; + `"bp"` uses belief propagation for large networks. + +# Example + +```julia +psi = normalize(psi) +``` + +See also: `norm`, [`inner`](@ref ITensorNetworks.inner). +""" function LinearAlgebra.normalize(tn::AbstractITensorNetwork; alg = "exact", kwargs...) return normalize(Algorithm(alg), tn; kwargs...) end diff --git a/src/solvers/applyexp.jl b/src/solvers/applyexp.jl index f4adab51..1f1be0af 100644 --- a/src/solvers/applyexp.jl +++ b/src/solvers/applyexp.jl @@ -113,6 +113,46 @@ end process_real_times(z) = iszero(abs(z)) ? 0.0 : round(-imag(z); digits = 10) +""" + time_evolve(operator, time_points, init_state; sweep_kwargs...) -> state + +Time-evolve `init_state` under `operator` using the Time-Dependent Variational Principle +(TDVP) algorithm. + +The state is evolved from `t=0` through the successive time points in `time_points`. +The operator should represent the Hamiltonian `H`; +internally the evolution `exp(-i H t)` is applied via a "local solver". + +# Arguments + + - `operator`: The Hamiltonian as a tensor network operator (e.g. built from an `OpSum`). + - `time_points`: A vector (or range) of time values. Can be real or complex. + - `init_state`: The initial tensor network state. + +# Keyword Arguments + + - `nsites=2`: Number of sites optimized per local update (1 or 2). + - `order=4`: Order of the TDVP sweep pattern and time step increments. + - `factorize_kwargs`: Keyword arguments for bond truncation, e.g. `(; cutoff=1e-10, maxdim=50)`. + - `outputlevel=0`: Verbosity level (0=silent, 1=print after each time step). + - `solver_kwargs`: Additional keyword arguments forwarded to the local solver (time stepping algorithm). + +# Returns + +The evolved state at `last(time_points)`. + +# Example + +```julia +times = 0.1:0.1:1.0 +psi_t = time_evolve(H, times, psi0; + nsites = 2, + order = 4, + factorize_kwargs = (; cutoff = 1e-10, maxdim = 50), + outputlevel = 1 +) +``` +""" function time_evolve( operator, time_points, diff --git a/src/solvers/eigsolve.jl b/src/solvers/eigsolve.jl index f5a0c8e8..a4d489a3 100644 --- a/src/solvers/eigsolve.jl +++ b/src/solvers/eigsolve.jl @@ -61,6 +61,41 @@ function default_sweep_callback( end end +""" + eigsolve(operator, init_state; nsweeps, nsites=1, factorize_kwargs, sweep_kwargs...) -> (eigenvalue, state) + +Find the lowest eigenvalue and corresponding eigenvector of `operator` using a +DMRG-like sweep algorithm on a `TreeTensorNetwork`. + +# Arguments + + - `operator`: The operator to diagonalize, typically a `TreeTensorNetwork` representing a + Hamiltonian constructed from an `OpSum` (e.g. via `ttn(opsum, sites)`). + - `init_state`: Initial guess for the eigenvector as a `TreeTensorNetwork`. + - `nsweeps`: Number of sweeps over the network. + - `nsites=1`: Number of sites optimized simultaneously per local update step (1 or 2). + - `factorize_kwargs`: Keyword arguments controlling bond truncation after each local solve, + e.g. `(; cutoff=1e-10, maxdim=50)`. + - `outputlevel=0`: Level of output to print (0 = no output, 1 = sweep level information, 2 = step details) + +# Returns + +A tuple `(eigenvalue, state)` where `eigenvalue` is the converged lowest eigenvalue and +`state` is the optimized `TreeTensorNetwork` eigenvector. + +# Example + +```julia +energy, psi = eigsolve(H, psi0; + nsweeps = 10, + nsites = 2, + factorize_kwargs = (; cutoff = 1e-10, maxdim = 50), + outputlevel = 1 +) +``` + +See also: [`dmrg`](@ref), [`time_evolve`](@ref). +""" function eigsolve( operator, init_state; nsweeps, nsites = 1, factorize_kwargs, sweep_kwargs... ) @@ -79,4 +114,22 @@ function eigsolve( return eigenvalue(prob), state(prob) end +""" + dmrg(operator, init_state; kwargs...) -> (eigenvalue, state) + +Find the lowest eigenvalue and eigenvector of `operator` using the Density Matrix +Renormalization Group (DMRG) algorithm. This is an alias for [`eigsolve`](@ref). + +See [`eigsolve`](@ref) for the full description of arguments and keyword arguments. + +# Example + +```julia +energy, psi = dmrg(H, psi0; + nsweeps = 10, + nsites = 2, + factorize_kwargs = (; cutoff = 1e-10, maxdim = 50) +) +``` +""" dmrg(operator, init_state; kwargs...) = eigsolve(operator, init_state; kwargs...) diff --git a/src/treetensornetworks/abstracttreetensornetwork.jl b/src/treetensornetworks/abstracttreetensornetwork.jl index 18922658..d6d90dcd 100644 --- a/src/treetensornetworks/abstracttreetensornetwork.jl +++ b/src/treetensornetworks/abstracttreetensornetwork.jl @@ -38,6 +38,25 @@ function gauge(alg::Algorithm, ttn::AbstractTTN, region; kwargs...) return gauge(alg, ttn, [region]; kwargs...) end +""" + orthogonalize(ttn::AbstractTreeTensorNetwork, region; kwargs...) -> TreeTensorNetwork + +Bring `ttn` into an orthogonal gauge with orthogonality center at `region`. +`region` may be a single vertex or a vector of vertices. + +QR decompositions are applied along the unique tree path from the current +`ortho_region` to `region`, so that all tensors outside `region` are left- or +right-orthogonal with respect to that path. + +# Example + +```julia +psi = orthogonalize(psi, root_vertex) # single-site orthogonality center +psi = orthogonalize(psi, [v1, v2]) # two-site orthogonality center +``` + +See also: [`set_ortho_region`](@ref), [`ortho_region`](@ref), [`truncate`](@ref). +""" function orthogonalize(ttn::AbstractTTN, region; kwargs...) return gauge(Algorithm("orthogonalize"), ttn, region; kwargs...) end @@ -50,6 +69,30 @@ end # Truncation # +""" + truncate(tn::AbstractTreeTensorNetwork; root_vertex=..., kwargs...) -> TreeTensorNetwork + +Truncate the bond dimensions of `tn` by sweeping from the leaves toward `root_vertex` +and performing an SVD-based truncation on each bond. + +Before truncating each bond the relevant subtree is first orthogonalized (controlled +truncation), ensuring that discarded singular values correspond to actual truncation error. +Truncation parameters are passed through `kwargs`. + +# Keyword Arguments + + - `root_vertex`: Root of the DFS traversal. Defaults to `default_root_vertex(tn)`. + - `cutoff`: Drop singular values smaller than this threshold (relative or absolute). + - `maxdim`: Maximum number of singular values to retain on each bond. + +# Example + +```julia +psi_trunc = truncate(psi; cutoff = 1e-10, maxdim = 50) +``` + +See also: [`orthogonalize`](@ref). +""" function Base.truncate( tn::AbstractTTN; root_vertex = GraphsExtensions.default_root_vertex(tn), kwargs... ) @@ -87,6 +130,16 @@ function NDTensors.contract( # return tn[root_vertex] end +""" + inner(x::AbstractTreeTensorNetwork, y::AbstractTreeTensorNetwork) -> Number + +Compute the inner product ⟨x|y⟩ by contracting the bra-ket network using a +post-order DFS traversal rooted at `root_vertex`. + +Both networks must have the same graph structure and compatible site indices. + +See also: [`loginner`](@ref ITensorNetworks.loginner), `norm`, [`inner(y, A, x)`](@ref ITensorNetworks.inner). +""" function ITensors.inner( x::AbstractTTN, y::AbstractTTN; root_vertex = GraphsExtensions.default_root_vertex(x) ) @@ -214,17 +267,6 @@ function _add_maxlinkdims(tns::AbstractTTN...) return maxdims end -# TODO: actually implement this? -function Base.:+( - ::Algorithm"densitymatrix", - tns::AbstractTTN...; - cutoff = 1.0e-15, - root_vertex = GraphsExtensions.default_root_vertex(first(tns)), - kwargs... - ) - return error("Not implemented (yet) for trees.") -end - function Base.:+( ::Algorithm"directsum", tns::AbstractTTN...; @@ -232,6 +274,15 @@ function Base.:+( ) @assert all(tn -> nv(first(tns)) == nv(tn), tns) + # For QN-conserving TN's, directsum strategy requires each + # tensor to have the same flux, which orthogonalizing + # to the same 'center' vertex ensures (assuming TN's have + # the same total flux) + tns = collect(tns) + for j in 1:length(tns) + tns[j] = orthogonalize(tns[j], root_vertex) + end + # Output state tn = ttn(siteinds(tns[1])) @@ -259,12 +310,37 @@ function Base.:+( end # TODO: switch default algorithm once more are implemented +""" + +(tn1::AbstractTreeTensorNetwork, tn2::AbstractTreeTensorNetwork; alg="directsum", kwargs...) -> TreeTensorNetwork + +Add two tree tensor networks by growing the bond dimension, returning a network that +represents the state `tn1 + tn2`. The bond dimension of the result is the sum of the +bond dimensions of the two inputs. + +Use [`truncate`](@ref) afterward to compress the resulting network. + +# Keyword Arguments + + - `alg="directsum"`: Algorithm for combining the networks. Currently only `"directsum"` is + supported for trees. + +Both networks must share the same graph structure and site indices. + +See also: [`add`](@ref), [`truncate`](@ref). +""" function Base.:+(tns::AbstractTTN...; alg = Algorithm"directsum"(), kwargs...) return +(Algorithm(alg), tns...; kwargs...) end Base.:+(tn::AbstractTTN) = tn +""" + add(tns::AbstractTreeTensorNetwork...; kwargs...) -> TreeTensorNetwork + +Add tree tensor networks together by growing the bond dimension. Equivalent to `+(tns...)`. + +See also: [`+(tns...)`](@ref), [`truncate`](@ref). +""" add(tns::AbstractTTN...; kwargs...) = +(tns...; kwargs...) function Base.:-(tn1::AbstractTTN, tn2::AbstractTTN; kwargs...) @@ -293,6 +369,16 @@ end # Inner products # +""" + inner(y::AbstractTreeTensorNetwork, A::AbstractTreeTensorNetwork, x::AbstractTreeTensorNetwork) -> Number + +Compute the matrix element ⟨y|A|x⟩ where `A` is a tree tensor network operator. +The contraction proceeds via a post-order DFS traversal. + +All three networks must have compatible graph structure and indices. + +See also: [`inner(x, y)`](@ref). +""" # TODO: implement using multi-graph disjoint union function ITensors.inner( y::AbstractTTN, @@ -341,6 +427,36 @@ function ITensors.inner( return O[] end +""" + expect(operator::String, state::AbstractTreeTensorNetwork; vertices=vertices(state), root_vertex=...) -> Dictionary + +Compute local expectation values ⟨state|op_v|state⟩ / ⟨state|state⟩ for each vertex `v` +in `vertices` using exact contraction via successive orthogonalization. + +The state is normalized before computing expectation values. The operator name is passed to +`ITensors.op`; each vertex must carry exactly one site index. + +# Arguments + + - `operator`: Name of the local operator, e.g. `"Sz"`, `"N"`, `"Sx"`. + - `state`: The tree tensor network state. + - `vertices`: Subset of vertices at which to evaluate the operator. Defaults to all vertices. + - `root_vertex`: Root used for the DFS traversal order. + +# Returns + +A `Dictionary` mapping each vertex to its (real-typed) expectation value. + +# Example + +```julia +sz = expect("Sz", psi) +sz_sub = expect("Sz", psi; vertices = [1, 3, 5]) +``` + +See also: [`expect(ψ, op::String)`](@ref) for general `ITensorNetwork` states with belief +propagation support. +""" function expect( operator::String, state::AbstractTTN; diff --git a/src/treetensornetworks/treetensornetwork.jl b/src/treetensornetworks/treetensornetwork.jl index 968dab9c..e9754f51 100644 --- a/src/treetensornetworks/treetensornetwork.jl +++ b/src/treetensornetworks/treetensornetwork.jl @@ -6,6 +6,17 @@ using NamedGraphs.GraphsExtensions: GraphsExtensions, vertextype """ TreeTensorNetwork{V} <: AbstractTreeTensorNetwork{V} + +A tensor network whose underlying graph is a tree. In addition to the tensor data, +it tracks an `ortho_region`: the set of vertices that currently form the orthogonality +center of the network. + +`TTN` is an alias for `TreeTensorNetwork`. + +Use [`ttn`](@ref) or [`mps`](@ref) to construct instances, and [`orthogonalize`](@ref) to +bring the network into a canonical gauge. + +See also: [`ITensorNetwork`](@ref), [`ttn`](@ref), [`mps`](@ref), [`random_ttn`](@ref). """ struct TreeTensorNetwork{V} <: AbstractTreeTensorNetwork{V} tensornetwork::ITensorNetwork{V} @@ -24,6 +35,25 @@ function _TreeTensorNetwork(tensornetwork::ITensorNetwork) return _TreeTensorNetwork(tensornetwork, vertices(tensornetwork)) end +""" + TreeTensorNetwork(tn::ITensorNetwork; ortho_region=vertices(tn)) -> TreeTensorNetwork + +Construct a `TreeTensorNetwork` from an `ITensorNetwork` with tree graph structure. + +The `ortho_region` keyword specifies which vertices currently form the orthogonality center. +By default all vertices are included, meaning no particular gauge is assumed. To enforce an +actual orthogonal gauge, call [`orthogonalize`](@ref) afterward. + +Throws an error if the underlying graph of `tn` is not a tree. + +# Example + +```julia +ttn_state = TreeTensorNetwork(itn; ortho_region = [root_vertex]) +``` + +See also: [`ttn`](@ref), [`ITensorNetwork`](@ref), [`orthogonalize`](@ref). +""" function TreeTensorNetwork(tn::ITensorNetwork; ortho_region = vertices(tn)) return _TreeTensorNetwork(tn, ortho_region) end @@ -34,7 +64,23 @@ end const TTN = TreeTensorNetwork # Field access +""" + ITensorNetwork(tn::TreeTensorNetwork) -> ITensorNetwork + +Convert a `TreeTensorNetwork` to a plain `ITensorNetwork`, discarding orthogonality +metadata. The returned network shares the same underlying tensor data. + +See also: [`TreeTensorNetwork`](@ref), [`ttn`](@ref). +""" ITensorNetwork(tn::TTN) = getfield(tn, :tensornetwork) + +""" + ortho_region(tn::TreeTensorNetwork) -> Indices + +Return the set of vertices that currently form the orthogonality center of `tn`. + +See also: [`orthogonalize`](@ref), [`set_ortho_region`](@ref). +""" ortho_region(tn::TTN) = getfield(tn, :ortho_region) # Required for `AbstractITensorNetwork` interface @@ -52,10 +98,43 @@ end # Constructor # +""" + set_ortho_region(tn::TreeTensorNetwork, ortho_region) -> TreeTensorNetwork + +Return a copy of `tn` with the `ortho_region` metadata updated to `ortho_region`, +**without** performing any gauge transformations on the tensors. + +This is a low-level bookkeeping update. To actually move the orthogonality center by +applying QR decompositions along the tree, use [`orthogonalize`](@ref). + +See also: [`ortho_region`](@ref), [`orthogonalize`](@ref). +""" function set_ortho_region(tn::TTN, ortho_region) return ttn(ITensorNetwork(tn); ortho_region) end +""" + ttn(args...; ortho_region=nothing) -> TreeTensorNetwork + +Construct a `TreeTensorNetwork` (TTN) using the same interface as [`ITensorNetwork`](@ref). +All positional and keyword arguments are forwarded to the `ITensorNetwork` constructor. + +If `ortho_region` is not specified, all vertices are set as the orthogonal region +(i.e. no particular gauge is assumed). Call [`orthogonalize`](@ref) to impose a gauge. + +# Example + +```julia +using ITensorNetworks, NamedGraphs.NamedGraphGenerators + +# Comb-tree TTN with random bond-dimension-2 tensors +g = named_comb_tree((3, 4)) +s = siteinds("S=1/2", g) +psi = ttn(v -> "Up", s; link_space = 2) +``` + +See also: [`mps`](@ref), [`random_ttn`](@ref), [`TreeTensorNetwork`](@ref). +""" function ttn(args...; ortho_region = nothing) tn = ITensorNetwork(args...) if isnothing(ortho_region) @@ -64,6 +143,14 @@ function ttn(args...; ortho_region = nothing) return _TreeTensorNetwork(tn, ortho_region) end +""" + mps(args...; ortho_region=nothing) -> TreeTensorNetwork + +Construct a matrix product state (MPS) as a `TreeTensorNetwork` on a 1D path graph. +The interface is identical to [`ttn`](@ref) but is intended for 1D (chain) topologies. + +See also: [`ttn`](@ref), [`random_mps`](@ref). +""" function mps(args...; ortho_region = nothing) # TODO: Check it is a path graph. tn = ITensorNetwork(args...) @@ -73,10 +160,41 @@ function mps(args...; ortho_region = nothing) return _TreeTensorNetwork(tn, ortho_region) end +""" + mps(f, is::Vector{<:Index}; kwargs...) -> TreeTensorNetwork + +Construct a matrix product state (MPS) from a function `f` and a flat vector of site +indices `is`. The indices are arranged on a 1D path graph automatically. + +# Example + +```julia +s = siteinds("S=1/2", 10) +psi = mps(v -> "Up", s) +``` +""" function mps(f, is::Vector{<:Index}; kwargs...) return mps(f, path_indsnetwork(is); kwargs...) end +""" + ttn(a::ITensor, is::IndsNetwork; ortho_region=..., kwargs...) -> TreeTensorNetwork + +Decompose a dense `ITensor` `a` into a `TreeTensorNetwork` with the tree structure +described by the `IndsNetwork` `is`. + +Successive QR/SVD factorizations are applied following a post-order DFS traversal from the +root vertex, then the network is orthogonalized to `ortho_region` (defaults to the root). +Extra `kwargs` (e.g. `cutoff`, `maxdim`) are forwarded to the factorization. + +# Example +```julia +i, j, k = Index(2, "i"), Index(2, "j"), Index(2, "k") +A = randomITensor(i, j, k) +is = IndsNetwork(named_comb_tree((3,)); site_space = [i, j, k]) +ttn_A = ttn(A, is) +``` +""" # Construct from dense ITensor, using IndsNetwork of site indices. function ttn( a::ITensor, @@ -102,20 +220,61 @@ function ttn( return orthogonalize(ttn_a, ortho_center) end +""" + random_ttn(args...; kwargs...) -> TreeTensorNetwork + +Construct a random, unit-norm `TreeTensorNetwork`. Arguments are forwarded to +`random_tensornetwork`, which accepts the same interface as [`ITensorNetwork`](@ref). + +# Example + +```julia +g = named_comb_tree((3, 4)) +s = siteinds("S=1/2", g) +psi = random_ttn(s; link_space = 4) +``` + +See also: [`ttn`](@ref), [`random_mps`](@ref). +""" function random_ttn(args...; kwargs...) # TODO: Check it is a tree graph. return normalize(_TreeTensorNetwork(random_tensornetwork(args...; kwargs...))) end +""" + random_mps(args...; kwargs...) -> TreeTensorNetwork + +Construct a random, unit-norm matrix product state (MPS) as a `TreeTensorNetwork`. +Arguments are forwarded to [`random_ttn`](@ref). + +# Example + +```julia +s = siteinds("S=1/2", 10) +psi = random_mps(s; link_space = 4) +``` + +See also: [`mps`](@ref), [`random_ttn`](@ref). +""" function random_mps(args...; kwargs...) # TODO: Check it is a path graph. return random_ttn(args...; kwargs...) end +""" + random_mps(f, is::Vector{<:Index}; kwargs...) -> TreeTensorNetwork + +Construct a random MPS from a function `f` and a flat vector of site indices `is`. +""" function random_mps(f, is::Vector{<:Index}; kwargs...) return random_mps(f, path_indsnetwork(is); kwargs...) end +""" + random_mps(s::Vector{<:Index}; kwargs...) -> TreeTensorNetwork + +Construct a random MPS from a flat vector of site indices `s`. +""" function random_mps(s::Vector{<:Index}; kwargs...) return random_mps(path_indsnetwork(s); kwargs...) end diff --git a/test/test_additensornetworks.jl b/test/test_additensornetworks.jl index 7f23c67e..02e1be9c 100644 --- a/test/test_additensornetworks.jl +++ b/test/test_additensornetworks.jl @@ -1,9 +1,10 @@ @eval module $(gensym()) using Graphs: rem_edge!, vertices -using ITensorNetworks: ITensorNetwork, inner_network, random_tensornetwork, siteinds +using ITensorNetworks: ITensorNetwork, inner_network, orthogonalize, random_tensornetwork, + siteinds, truncate, ttn using ITensors: ITensors, apply, inner, op, scalar using LinearAlgebra: norm_sqr -using NamedGraphs.NamedGraphGenerators: named_grid +using NamedGraphs.NamedGraphGenerators: named_comb_tree, named_grid using NamedGraphs: NamedEdge using StableRNGs: StableRNG using TensorOperations: TensorOperations @@ -54,4 +55,35 @@ using Test: @test, @testset @test expec_method1 ≈ expec_method2 end + +# +# This test is a regression test for an +# issue where summing two product states +# results in incorrect fluxes of the +# output state's tensors +# +@testset "Sum Product States" begin + g = named_comb_tree((2, 2)) + sites = siteinds("S=1/2", g; conserve_qns = true) + + verts = collect(vertices(g)) + + state1 = Dict{Tuple, String}() + for (j, v) in enumerate(verts) + state1[v] = isodd(j) ? "Up" : "Dn" + end + ψ1 = ttn(state1, sites) + + state2 = Dict{Tuple, String}() + for (j, v) in enumerate(vertices(g)) + state2[v] = isodd(j) ? "Dn" : "Up" + end + ψ2 = ttn(state2, sites) + + ϕ = ψ1 + ψ2 + + for v in vertices(g) + @test ITensors.allfluxequal(ITensors.tensor(ϕ[v])) + end +end end