Skip to content
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,23 @@ jobs:
files: lcov.info
token: ${{ secrets.CODECOV_TOKEN }}
fail_ci_if_error: true

ext:
name: Ext (logdensityproblems, ${{ matrix.version }})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
version:
- '1'
- 'min'
steps:
- uses: actions/checkout@v6
- uses: julia-actions/setup-julia@v2
with:
version: ${{ matrix.version }}
- uses: julia-actions/cache@v3
- uses: julia-actions/julia-buildpkg@v1
- run: julia --project=. test/run_extras.jl
env:
LABEL: ext/logdensityproblems
7 changes: 6 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@ uuid = "7a57a42e-76ec-4ea3-a279-07e840d6d9cf"
keywords = ["probablistic programming"]
license = "MIT"
desc = "Common interfaces for probabilistic programming"
version = "0.14.2"
version = "0.14.3"

[deps]
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
AbstractMCMC = "80f14c24-f653-4e6a-9b94-39d6b0f70001"
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
BangBang = "198e06fe-97b7-11e9-32a5-e1d131e6ad66"
Expand All @@ -19,11 +20,15 @@ StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"

[weakdeps]
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c"

[extensions]
AbstractPPLDistributionsExt = ["Distributions", "LinearAlgebra"]
AbstractPPLLogDensityProblemsExt = ["LogDensityProblems"]

[compat]
ADTypes = "1"
LogDensityProblems = "2"
AbstractMCMC = "2, 3, 4, 5"
Accessors = "0.1"
BangBang = "0.4"
Expand Down
4 changes: 3 additions & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
[deps]
ADTypes = "47edcb42-4c32-4615-8424-f2b9edc5f35b"
AbstractPPL = "7a57a42e-76ec-4ea3-a279-07e840d6d9cf"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"

[sources]
AbstractPPL = {path = "../"}
AbstractPPL = {path = ".."}
2 changes: 1 addition & 1 deletion docs/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ DocMeta.setdocmeta!(AbstractPPL, :DocTestSetup, :(using AbstractPPL); recursive=
makedocs(;
sitename="AbstractPPL",
modules=[AbstractPPL, Base.get_extension(AbstractPPL, :AbstractPPLDistributionsExt)],
pages=["index.md", "varname.md", "pplapi.md", "interface.md"],
pages=["index.md", "varname.md", "pplapi.md", "evaluators.md", "interface.md"],
checkdocs=:exports,
doctest=false,
)
163 changes: 163 additions & 0 deletions docs/src/evaluators.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
# Evaluator preparation and AD

AbstractPPL provides a small interface for preparing callables and asking a
prepared evaluator for values and derivatives. `prepare` binds a callable to a
sample input that establishes the expected input shape and type;
`value_and_gradient!!` and `value_and_jacobian!!` then return the value and
derivative together.

The `!!` suffix signals that the returned gradient or Jacobian **may alias
internal cache buffers** of the prepared evaluator. The next call to
`value_and_gradient!!` (or `value_and_jacobian!!`) may overwrite that buffer
in place, so a previously-returned reference will silently change. Copy
before holding on to a result:

```julia
val, grad = value_and_gradient!!(prepared, x1)
saved = copy(grad) # safe to keep
val2, grad2 = value_and_gradient!!(prepared, x2)
# `grad` may now reflect `x2`; `saved` still reflects `x1`
```

Backends that always allocate fresh output (e.g. `ForwardDiff.gradient`) do
not actually alias, but consumers should not rely on that — write to the
contract, not the implementation.

## Quick start

```@example ad
using AbstractPPL
using AbstractPPL: prepare, value_and_gradient!!
using AbstractPPL.Evaluators: Prepared, VectorEvaluator, NamedTupleEvaluator
using ADTypes: AutoForwardDiff
using ForwardDiff: ForwardDiff

function AbstractPPL.prepare(adtype::AutoForwardDiff, f, x::AbstractVector{<:Real})
return Prepared(adtype, VectorEvaluator(f, length(x)))
end

function AbstractPPL.value_and_gradient!!(
p::Prepared{<:AutoForwardDiff}, x::AbstractVector{<:Real}
)
return (p(x), ForwardDiff.gradient(p.evaluator.f, x))
end

mvnormal_logp(x) = -0.5 * sum(abs2, x) # standard normal log density (up to constant)
prepared = prepare(AutoForwardDiff(), mvnormal_logp, zeros(3))
value_and_gradient!!(prepared, [1.0, 2.0, 3.0])
```

## Two input styles

### Vector inputs

When the callable accepts a flat vector, pass a sample vector whose length
matches the expected input:

```@example ad
prepared([1.0, 2.0, 3.0])
```

For vector-valued callables, use `value_and_jacobian!!`. The returned Jacobian
has shape `(length(value), length(x))`. The same backend extension that
defines `value_and_gradient!!` typically also defines `value_and_jacobian!!`
on the same `Prepared` type — they are separate generic functions, so the
two methods coexist without conflict and the caller picks whichever applies
to their function:

```@example ad
using AbstractPPL: value_and_jacobian!!

function AbstractPPL.value_and_jacobian!!(
p::Prepared{<:AutoForwardDiff}, x::AbstractVector{<:Real}
)
return (p(x), ForwardDiff.jacobian(p.evaluator.f, x))
end

vecfun(x) = [x[1] * x[2], x[2] + x[3]]
prepared_vec = prepare(AutoForwardDiff(), vecfun, zeros(3))
value_and_jacobian!!(prepared_vec, [2.0, 3.0, 4.0])
```

### NamedTuple inputs

When the callable accepts a `NamedTuple`, pass a sample `NamedTuple` whose
field names and value types match the expected input. The prototype's leaves
must be `Real`, `Complex`, `AbstractArray` (recursively), `Tuple`, or
`NamedTuple`. An extension can define a `prepare` overload that wraps the
function in a `NamedTupleEvaluator`:

```@example ad
function AbstractPPL.prepare(adtype::AutoForwardDiff, f, values::NamedTuple)
return Prepared(adtype, NamedTupleEvaluator(f, values))
end

ntfun(v::NamedTuple) = v.a^2 + sum(abs2, v.b)
prepared_nt = prepare(AutoForwardDiff(), ntfun, (a=0.0, b=zeros(2)))
prepared_nt((a=1.0, b=[2.0, 3.0]))
```

## AD backends

Automatic differentiation packages extend the interface by implementing
`value_and_gradient!!` and `value_and_jacobian!!` for specific cache types
stored in `prepared.cache`:

```julia
prepared = prepare(adtype, problem, prototype) # returns Prepared{AD,E,Cache}
value_and_gradient!!(prepared, x) # may return aliased cache buffer
value_and_jacobian!!(prepared, x)
```

`Prepared` has three fields: `adtype`, `evaluator` (the user-facing callable),
and `cache` (backend-specific pre-allocated state such as ForwardDiff configs or
Mooncake tapes). Backend extensions dispatch on the cache type:

```julia
function AbstractPPL.prepare(
adtype::MyADType, problem, x::AbstractVector{<:Real}; check_dims::Bool=true
)
f = ... # extract callable from problem
cache = MyCache(f, x)
return Prepared(adtype, VectorEvaluator{check_dims}(f, length(x)), cache)
end

function AbstractPPL.value_and_gradient!!(
p::Prepared{<:AbstractADType,<:VectorEvaluator,<:MyCache}, x::AbstractVector{<:Real}
)
# use p.cache to avoid allocations
return ...
end
```

Pass `check_dims=false` in your `prepare` implementation to construct a
`VectorEvaluator{false}`, which skips the per-call length check. This is an
opt-in trust mode — the caller takes responsibility for `length(x)`. The
typical use is inside a backend's `value_and_gradient!!`, where the AD
library invokes the inner callable many times with same-length dual arrays
derived from a single user-supplied `x`; re-validating on each invocation
would be redundant work in the hot path.

## Without an AD backend

The two-argument form `prepare(problem, x)` is available without any AD
package. By default it wraps `problem` in a `VectorEvaluator{check_dims}`
(or `NamedTupleEvaluator{check_dims}` for the `NamedTuple` form), giving you
a callable that runs the per-call shape check before forwarding to
`problem`. Downstream code that only needs primal evaluation (e.g.
log-density only, no gradient) can call `prepare(...)` uniformly without
knowing whether an AD backend is loaded:

```@example ad
sumsimple(x) = sum(x)
p = prepare(sumsimple, zeros(3)) # `VectorEvaluator{true}(sumsimple, 3)`
p([1.0, 2.0, 3.0])
```
Comment thread
yebai marked this conversation as resolved.

## API reference

```@docs
AbstractPPL.prepare
AbstractPPL.value_and_gradient!!
AbstractPPL.value_and_jacobian!!
```
4 changes: 4 additions & 0 deletions docs/src/pplapi.md
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,7 @@ evaluate!!
```@docs
AbstractModelTrace
```

## Evaluators interface

See [Evaluator preparation and AD](@ref) for a full guide and API reference.
42 changes: 42 additions & 0 deletions ext/AbstractPPLLogDensityProblemsExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
module AbstractPPLLogDensityProblemsExt

using AbstractPPL: AbstractPPL
using AbstractPPL.Evaluators: Prepared, VectorEvaluator
using LogDensityProblems: LogDensityProblems

# LDP integration is restricted to vector-input evaluators; `NamedTupleEvaluator`
# does not satisfy LDP's vector-input contract. Scalar output is a runtime
# contract the user must satisfy.

LogDensityProblems.logdensity(p::Prepared{<:Any,<:VectorEvaluator}, x) = p(x)
LogDensityProblems.logdensity(e::VectorEvaluator, x) = e(x)

function LogDensityProblems.dimension(p::Prepared{<:Any,<:VectorEvaluator})
return LogDensityProblems.dimension(p.evaluator)
end
LogDensityProblems.dimension(e::VectorEvaluator) = e.dim

# Generic fallback: order 0. AD-backend extensions (DifferentiationInterface,
# ForwardDiff, Mooncake, etc.) must overload this for their cache type to
# advertise `LogDensityOrder{1}` — without that overload,
# `logdensity_and_gradient` will hit the `value_and_gradient!!` stub and fail.
function LogDensityProblems.capabilities(::Type{<:Prepared{<:Any,<:VectorEvaluator}})
return LogDensityProblems.LogDensityOrder{0}()
end
function LogDensityProblems.capabilities(p::Prepared{<:Any,<:VectorEvaluator})
return LogDensityProblems.capabilities(typeof(p))
end

function LogDensityProblems.capabilities(::Type{<:VectorEvaluator})
return LogDensityProblems.LogDensityOrder{0}()
end
function LogDensityProblems.capabilities(e::VectorEvaluator)
return LogDensityProblems.capabilities(typeof(e))
end

function LogDensityProblems.logdensity_and_gradient(p::Prepared{<:Any,<:VectorEvaluator}, x)
val, grad = AbstractPPL.value_and_gradient!!(p, x)
return (val, copy(grad))
end

end # module
6 changes: 6 additions & 0 deletions src/AbstractPPL.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ export AbstractModelTrace
include("abstractmodeltrace.jl")
include("abstractprobprog.jl")
include("evaluate.jl")
include("evaluators/Evaluators.jl")
using .Evaluators: prepare, value_and_gradient!!, value_and_jacobian!!
@static if VERSION >= v"1.11.0"
eval(Meta.parse("public prepare, value_and_gradient!!, value_and_jacobian!!"))
end

include("varname/optic.jl")
include("varname/varname.jl")
include("varname/subsumes.jl")
Expand Down
2 changes: 1 addition & 1 deletion src/evaluate.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ Common base type for evaluation contexts.
"""
abstract type AbstractContext end

"""
"""
evaluate!!

General API for model operations, e.g. prior evaluation, log density, log joint etc.
Expand Down
Loading
Loading