Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
181 changes: 181 additions & 0 deletions benchmarks/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
<<<<<<< HEAD
using Pkg

using Chairmarks: @be
using DynamicPPLBenchmarks: Models, benchmark, model_dimension
using JSON: JSON
using PrettyTables: pretty_table, fmt__printf, EmptyCells, MultiColumn, TextTableFormat
=======
using ADTypes: ADTypes
using Distributions:
Categorical,
Expand All @@ -16,6 +24,7 @@ using ForwardDiff: ForwardDiff
using LinearAlgebra: cholesky
using Mooncake: Mooncake
using PrettyTables: pretty_table
>>>>>>> upstream/main
using Printf: @sprintf
using ReverseDiff: ReverseDiff
using StableRNGs: StableRNG
Expand All @@ -24,6 +33,29 @@ using StableRNGs: StableRNG
# Models
#

<<<<<<< HEAD
colnames = ["Model", "Dim", "AD Backend", "Linked", "t(eval)/ns", "t(grad)/t(eval)"]
function print_results(results_table; to_json=false)
if to_json
# Print to the given file as JSON
results_array = [
Dict(colnames[i] => results_table[j][i] for i in eachindex(colnames)) for
j in eachindex(results_table)
]
# do not use pretty=true, as GitHub Actions expects no linebreaks
JSON.json(stdout, results_array)
println()
else
# Pretty-print to terminal
table_matrix = hcat(Iterators.map(collect, zip(results_table...))...)
return pretty_table(
table_matrix;
column_labels=colnames,
backend=:text,
formatters=[fmt__printf("%.1f", [6, 7])],
fit_table_in_display_horizontally=false,
fit_table_in_display_vertically=false,
=======
"One scalar assumption, one scalar observation."
@model function simple_assume_observe(obs)
x ~ Normal()
Expand Down Expand Up @@ -147,6 +179,7 @@ function model_dimension(model, islinked)
DynamicPPL.InitFromPrior(),
transform_strategy(islinked),
),
>>>>>>> upstream/main
)
length(vi[:])
catch
Expand Down Expand Up @@ -202,12 +235,59 @@ function print_results(results)
isempty(results) && return println("No benchmark results obtained.")
rows = map(results) do r
(
<<<<<<< HEAD
"Simple assume observe",
Models.simple_assume_observe(randn(rng)),
:forwarddiff,
false,
),
("Smorgasbord", smorgasbord_instance, :forwarddiff, false),
("Smorgasbord", smorgasbord_instance, :forwarddiff, true),
("Smorgasbord", smorgasbord_instance, :reversediff, true),
("Smorgasbord", smorgasbord_instance, :mooncake, true),
("Smorgasbord", smorgasbord_instance, :enzyme, true),
("Loop univariate 1k", loop_univariate1k, :mooncake, true),
("Multivariate 1k", multivariate1k, :mooncake, true),
("Loop univariate 10k", loop_univariate10k, :mooncake, true),
("Multivariate 10k", multivariate10k, :mooncake, true),
("Dynamic", Models.dynamic(), :mooncake, true),
("Submodel", Models.parent(randn(rng)), :mooncake, true),
("LDA", lda_instance, :reversediff, true),
]

results_table = Tuple{
String,Int,String,Bool,Union{Float64,Missing},Union{Float64,Missing}
}[]

for (model_name, model, adbackend, islinked) in chosen_combinations
@info "Running benchmark for $model_name, $adbackend, $islinked"
relative_eval_time, relative_ad_eval_time = try
results = benchmark(model, adbackend, islinked)
@info " t(eval) = $(results.primal_time)"
@info " t(grad) = $(results.grad_time)"
(results.primal_time * 1e9), (results.grad_time / results.primal_time)
catch e
@info "benchmark errored: $e"
missing, missing
end
push!(
results_table,
(
model_name,
model_dimension(model, islinked),
string(adbackend),
islinked,
relative_eval_time,
relative_ad_eval_time,
),
=======
r.name,
format_dim(r.dim),
r.adbackend,
r.islinked,
format_time(r.t_logd),
format_ratio(r.ratio),
>>>>>>> upstream/main
)
end
matrix = hcat(Iterators.map(collect, zip(rows...))...)
Expand Down Expand Up @@ -304,11 +384,112 @@ function run(; markdown::Bool=false)
return nothing
end

<<<<<<< HEAD
struct TestCase
model_name::String
dim::Integer
ad_backend::String
linked::Bool
TestCase(d::Dict{String,Any}) = new((d[c] for c in colnames[1:4])...)
end
function combine(head_filename::String, base_filename::String)
head_results = try
JSON.parsefile(head_filename, Vector{Dict{String,Any}})
catch
Dict{String,Any}[]
end
@info "Loaded $(length(head_results)) results from $head_filename"
base_results = try
JSON.parsefile(base_filename, Vector{Dict{String,Any}})
catch
Dict{String,Any}[]
end
@info "Loaded $(length(base_results)) results from $base_filename"
# Identify unique combinations of (Model, Dim, AD Backend, Linked)
head_testcases = Dict(
TestCase(d) => (d[colnames[5]], d[colnames[6]]) for d in head_results
)
base_testcases = Dict(
TestCase(d) => (d[colnames[5]], d[colnames[6]]) for d in base_results
)
all_testcases = union(Set(keys(head_testcases)), Set(keys(base_testcases)))
@info "$(length(all_testcases)) unique test cases found"
sorted_testcases = sort(
collect(all_testcases); by=(c -> (c.model_name, c.linked, c.ad_backend))
)
results_table = Tuple{
String,
Int,
String,
Bool,
String,
String,
String,
String,
String,
String,
String,
String,
String,
}[]
sublabels = ["base", "this PR", "speedup"]
results_colnames = [
[
EmptyCells(4),
MultiColumn(3, "t(eval)/ns"),
MultiColumn(3, "t(grad) / t(eval)"),
MultiColumn(3, "t(grad) / t(ref)"),
],
[colnames[1:4]..., sublabels..., sublabels..., sublabels...],
]
sprint_float(x::Float64) = @sprintf("%.2f", x)
sprint_float(m::Missing) = "err"
for c in sorted_testcases
head_eval, head_grad = get(head_testcases, c, (missing, missing))
base_eval, base_grad = get(base_testcases, c, (missing, missing))
# If the benchmark errored, it will return `missing` in the `run()` function above.
# The issue with this is that JSON serialisation converts it to `null`, and then
# when reading back from JSON, it becomes `nothing` instead of `missing`!
head_eval = head_eval === nothing ? missing : head_eval
head_grad = head_grad === nothing ? missing : head_grad
base_eval = base_eval === nothing ? missing : base_eval
base_grad = base_grad === nothing ? missing : base_grad
# Finally that lets us do this division safely
speedup_eval = base_eval / head_eval
speedup_grad = base_grad / head_grad
# As well as this multiplication, which is t(grad) / t(ref)
head_grad_vs_ref = head_grad * head_eval
base_grad_vs_ref = base_grad * base_eval
speedup_grad_vs_ref = base_grad_vs_ref / head_grad_vs_ref
push!(
results_table,
(
c.model_name,
c.dim,
c.ad_backend,
c.linked,
sprint_float(base_eval),
sprint_float(head_eval),
sprint_float(speedup_eval),
sprint_float(base_grad),
sprint_float(head_grad),
sprint_float(speedup_grad),
sprint_float(base_grad_vs_ref),
sprint_float(head_grad_vs_ref),
sprint_float(speedup_grad_vs_ref),
),
)
end
# Pretty-print to terminal
if isempty(results_table)
println("No benchmark results obtained.")
=======
if abspath(PROGRAM_FILE) == @__FILE__
if ARGS == ["markdown"]
run(; markdown=true)
elseif ARGS == []
run()
>>>>>>> upstream/main
else
error("invalid arguments: $(ARGS)")
end
Expand Down
7 changes: 5 additions & 2 deletions src/abstract_varinfo.jl
Original file line number Diff line number Diff line change
Expand Up @@ -399,8 +399,11 @@ Return an iterator over all `vns` in `vi`.
"""
getindex(vi::AbstractVarInfo, ::Colon)

Return the current value(s) of `vn` (`vns`) in `vi` in the support of its (their)
distribution(s) as a flattened `Vector`.
Return the internal value(s) stored in `vi` as a flattened `Vector`. Note that
these values may be in transformed (linked) space if `vi` has been linked.

For untransformed values, use [`getindex_internal`](@ref) after calling
[`invlink`](@ref) on `vi`.

The default implementation is to call [`internal_values_as_vector`](@ref).
"""
Expand Down
Loading