Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@ __pycache__
.ipynb*
Manifest.toml
.vscode
benchmark
experimental
refs
*.mathcha

/benchmark/results/
4 changes: 4 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0"
TensorKit = "07d1fe3e-3e46-537d-9eac-e9e13d0d4cec"
31 changes: 31 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# TensorKit Benchmarks

This directory contains a benchmark suite for TensorKit.
Most of the benchmarks are designed to capture performance characteristics of the library, and are not intended to be used as a benchmark suite for comparing different libraries.
In particular, the main goal here is to catch performance regressions and/or improvements between different versions of TensorKit.

## Running the benchmarks

The benchmarks are written using `BenchmarkTools.jl`, and the full suite can be found in the `SUITE` global variable defined in `benchmarks.jl`.
Sometimes, it is useful to run only a subset of the benchmarks.
To do this, you can use the `--modules` flag to specify which modules to run.
Alternatively, you can use the `TensorKitBenchmarks` module directly, which is designed after `BaseBenchmarks` to allow for conditional loading of the benchmarks.

For a more streamlined CLI experience, you can use [`AirspeedVelocity.jl`](https://github.com/MilesCranmer/AirspeedVelocity.jl) to run the benchmarks.
The following command will run the benchmarks and compare with the current master branch:

```bash
benchpkg TensorKit \
--rev=dirty,master \
-o benchmark/results/ \
-exeflags="--threads=4"
```

To compare with previous results, the following command can be used:

```bash
benchpkgtable TensorKit \
--rev=dirty,master \
-i benchmark/results/ \
-o benchmark/results/ \
```
55 changes: 55 additions & 0 deletions benchmark/TensorKitBenchmarks/TensorKitBenchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
module TensorKitBenchmarks

using BenchmarkTools
using TensorKit
using TOML

BenchmarkTools.DEFAULT_PARAMETERS.seconds = 1.0
BenchmarkTools.DEFAULT_PARAMETERS.samples = 10000
BenchmarkTools.DEFAULT_PARAMETERS.time_tolerance = 0.15
BenchmarkTools.DEFAULT_PARAMETERS.memory_tolerance = 0.01

const PARAMS_PATH = joinpath(@__DIR__, "etc", "params.json")
const SUITE = BenchmarkGroup()
const MODULES = Dict("linalg" => :LinalgBenchmarks,
"indexmanipulations" => :IndexManipulationBenchmarks,
"tensornetworks" => :TensorNetworkBenchmarks)

load!(id::AbstractString; kwargs...) = load!(SUITE, id; kwargs...)

function load!(group::BenchmarkGroup, id::AbstractString; tune::Bool=false)
modsym = MODULES[id]
modpath = joinpath(dirname(@__FILE__), id, "$(modsym).jl")
Core.eval(@__MODULE__, :(include($modpath)))
mod = Core.eval(@__MODULE__, modsym)
modsuite = @invokelatest getglobal(mod, :SUITE)
group[id] = modsuite
if tune
results = BenchmarkTools.load(PARAMS_PATH)[1]
haskey(results, id) && loadparams!(modsuite, results[id], :evals)
end
return group
end

loadall!(; kwargs...) = loadall!(SUITE; kwargs...)

function loadall!(group::BenchmarkGroup; verbose::Bool=true, tune::Bool=false)
for id in keys(MODULES)
if verbose
print("loading group $(repr(id))... ")
time = @elapsed load!(group, id, tune=false)
println("done (took $time seconds)")
else
load!(group, id; tune=false)
end
end
if tune
results = BenchmarkTools.load(PARAMS_PATH)[1]
for (id, suite) in group
haskey(results, id) && loadparams!(suite, results[id], :evals)
end
end
return group
end

end
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
module IndexManipulationBenchmarks

include(joinpath(@__DIR__, "..", "utils", "BenchUtils.jl"))

using .BenchUtils
using BenchmarkTools
using TensorKit
using TOML

const SUITE = BenchmarkGroup()
const all_parameters = TOML.parsefile(joinpath(@__DIR__, "benchparams.toml"))

# permute!
# --------
function init_permute_tensors(T, W, p)
C = randn(T, permute(W, p))
A = randn(T, W)
return C, A
end
function benchmark_permute!(benchgroup, params::Dict)
haskey(benchgroup, "permute") || addgroup!(benchgroup, "permute")
bench = benchgroup["permute"]
for kwargs in expand_kwargs(params)
benchmark_permute!(bench; kwargs...)
end
return nothing
end
function benchmark_permute!(bench; sigmas=nothing, T="Float64", I="Trivial", dims, p)
T_ = parse_type(T)
I_ = parse_type(I)

p_ = (Tuple(p[1]), Tuple(p[2]))
Vs = generate_space.(I_, dims, sigmas)

codomain = mapreduce(Base.Fix1(getindex, Vs), ⊗, p_[1]; init=one(eltype(Vs)))
domain = mapreduce(Base.Fix1(getindex, Vs), ⊗, p_[2]; init=one(eltype(Vs)))
init() = init_permute_tensors(T_, codomain ← domain, p_)

bench[T, I, dims, sigmas, p] = @benchmarkable permute!(C, A, $p_) setup = ((C, A) = $init())
return nothing
end

if haskey(all_parameters, "permute")
g = addgroup!(SUITE, "permute")
for params in all_parameters["permute"]
benchmark_permute!(g, params)
end
end

# transpose!
# ----------

# TODO

end
12 changes: 12 additions & 0 deletions benchmark/TensorKitBenchmarks/indexmanipulations/benchparams.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
[[permute]]
T = ["Float64"]
I = "Trivial"
p = [[[2, 1], []]]
dims = [[7264, 7264], [43408, 1216]]

[[permute]]
T = ["Float64"]
I = "Z2Irrep"
p = [[[2, 1], []]]
dims = [[7264, 7264], [43408, 1216]]
sigmas = [[0.5, 0.5]]
81 changes: 81 additions & 0 deletions benchmark/TensorKitBenchmarks/linalg/LinalgBenchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
module LinalgBenchmarks

include(joinpath(@__DIR__, "..", "utils", "BenchUtils.jl"))

using .BenchUtils
using BenchmarkTools
using TensorKit
using TOML

const SUITE = BenchmarkGroup()
const all_parameters = TOML.parsefile(joinpath(@__DIR__, "benchparams.toml"))

# mul!
# ----
function init_mul_tensors(T, V)
A = randn(T, V[1] ← V[2])
B = randn(T, V[2] ← V[3])
C = randn(T, V[1] ← V[3])
return A, B, C
end

function benchmark_mul!(benchgroup, params::Dict)
haskey(benchgroup, "mul") || addgroup!(benchgroup, "mul")
bench = benchgroup["mul"]
for kwargs in expand_kwargs(params)
benchmark_mul!(bench; kwargs...)
end
return nothing
end

function benchmark_mul!(bench; sigmas=nothing, T="Float64", I="Trivial", dims)
T_ = parse_type(T)
I_ = parse_type(I)

Vs = generate_space.(I_, dims, sigmas)
init() = init_mul_tensors(T_, Vs)

bench[T, I, dims, sigmas] = @benchmarkable mul!(C, A, B) setup = ((A, B, C) = $init())

return nothing
end

if haskey(all_parameters, "mul")
g = addgroup!(SUITE, "mul")
for params in all_parameters["mul"]
benchmark_mul!(g, params)
end
end

# svd!
# ----
function init_svd_tensor(T, V)
A = randn(T, V[1] ← V[2])
return A
end

function benchmark_svd!(benchgroup, params::Dict)
haskey(benchgroup, "svd") || addgroup!(benchgroup, "svd")
bench = benchgroup["svd"]
for kwargs in expand_kwargs(params)
benchmark_svd!(bench; kwargs...)
end
return nothing
end
function benchmark_svd!(bench; sigmas=nothing, T="Float64", I="Trivial", dims)
T_ = parse_type(T)
I_ = parse_type(I)
Vs = generate_space.(I_, dims, sigmas)
init() = init_svd_tensor(T_, Vs)
bench[T, I, dims, sigmas] = @benchmarkable tsvd!(A) setup = (A = $init())
return nothing
end

if haskey(all_parameters, "svd")
g = addgroup!(SUITE, "svd")
for params in all_parameters["svd"]
benchmark_svd!(g, params)
end
end

end
21 changes: 21 additions & 0 deletions benchmark/TensorKitBenchmarks/linalg/benchparams.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
[[mul]]
T = ["Float64", "ComplexF64"]
I = "Trivial"
dims = [[2, 2, 2], [8, 8, 8], [32, 32, 32], [64, 64, 64], [128, 128, 128]]

[[mul]]
T = ["Float64", "ComplexF64"]
I = "Z2Irrep"
dims = [[2, 2, 2], [8, 8, 8], [32, 32, 32], [64, 64, 64], [128, 128, 128]]
sigmas = [[0.5, 0.5, 0.5]]

[[svd]]
T = ["Float64", "ComplexF64"]
I = "Trivial"
dims = [[2, 2], [8, 8], [32, 32], [64, 64], [128, 128]]

[[svd]]
T = ["Float64", "ComplexF64"]
I = "Z2Irrep"
dims = [[2, 2], [8, 8], [32, 32], [64, 64], [128, 128]]
sigmas = [[0.5, 0.5]]
Loading
Loading