diff --git a/docs/src/api.md b/docs/src/api.md index a588929a..1a39d5ba 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -9,7 +9,16 @@ This modularity means that different HMC variants can be easily constructed by c - Diagonal metric: `DiagEuclideanMetric(dim)` - Dense metric: `DenseEuclideanMetric(dim)` -where `dim` is the dimensionality of the sampling space. +where `dim` is the dimension of the sampling space. + +Furthermore, there is now an experimental dense Riemannian metric implementation, specifiable as `DenseRiemannianMetric(dim, premetric, premetric_sensitivities, metric_map=IdentityMap())`, with + + - `dim`: again the dimension of the sampling space, + - `premetric`: a function which, for a given posterior position `pos`, computes either + a) a symmetric, **positive definite** matrix acting as the position dependent Riemannian metric (if `metric_map = IdentityMap()`), or + b) a symmetric, **not necessarily positive definite** matrix acting as the position dependent Riemannian metric after being passed through the `metric_map` argument, which will have to ensure that its return value *is* positive definite (like `metric_map = SoftAbsMap(alpha)`), + - `premetric_sensitivities`: a function which, again for a given posterior position `pos`, computes the sensitivities with respect to this position of the **`premetric`** function, + - `metric_map=IdentityMap()`: a function which takes in `premetric(pos)` and returns a symmetric positive definite matrix. Provided options are `IdentityMap()` or `SoftAbsMap(alpha)`, with the `SoftAbsMap` type allowing to work directly with the `premetric` returning the Hessian of the log density function, which generally is not guaranteed to be positive definite.. ### [Integrator (`integrator`)](@id integrator) diff --git a/src/AdvancedHMC.jl b/src/AdvancedHMC.jl index 5db7644c..4aa3f145 100644 --- a/src/AdvancedHMC.jl +++ b/src/AdvancedHMC.jl @@ -2,7 +2,19 @@ module AdvancedHMC using Statistics: mean, var, middle using LinearAlgebra: - Symmetric, UpperTriangular, mul!, ldiv!, dot, I, diag, cholesky, UniformScaling + Symmetric, + UpperTriangular, + mul!, + ldiv!, + dot, + I, + diag, + cholesky, + UniformScaling, + logdet, + tr, + eigen, + diagm using StatsFuns: logaddexp, logsumexp, loghalf using Random: Random, AbstractRNG using ProgressMeter: ProgressMeter @@ -40,7 +52,7 @@ struct GaussianKinetic <: AbstractKinetic end export GaussianKinetic include("metric.jl") -export UnitEuclideanMetric, DiagEuclideanMetric, DenseEuclideanMetric +export UnitEuclideanMetric, DiagEuclideanMetric, DenseEuclideanMetric, DenseRiemannianMetric include("hamiltonian.jl") export Hamiltonian @@ -50,6 +62,11 @@ export Leapfrog, JitteredLeapfrog, TemperedLeapfrog include("riemannian/integrator.jl") export GeneralizedLeapfrog +include("riemannian/metric.jl") +export IdentityMap, SoftAbsMap, DenseRiemannianMetric + +include("riemannian/hamiltonian.jl") + include("trajectory.jl") export Trajectory, HMCKernel, diff --git a/src/riemannian/hamiltonian.jl b/src/riemannian/hamiltonian.jl index 6f051ffb..11266b5b 100644 --- a/src/riemannian/hamiltonian.jl +++ b/src/riemannian/hamiltonian.jl @@ -1,229 +1,114 @@ -using Random - -### integrator.jl - -import AdvancedHMC: ∂H∂θ, ∂H∂r, DualValue, PhasePoint, phasepoint, step -using AdvancedHMC: TYPEDEF, TYPEDFIELDS, AbstractScalarOrVec, AbstractLeapfrog, step_size +#! Eq (14) of Girolami & Calderhead (2011) +"The gradient of the Hamiltonian with respect to the momentum." +function ∂H∂r( + h::Hamiltonian{<:DenseRiemannianMetric,<:GaussianKinetic}, + θ::AbstractVector, + r::AbstractVector, +) + H = h.metric.G(θ) + G = h.metric.map(H) + return G \ r +end """ -$(TYPEDEF) +Computes `tr(A*B)` for square n x n matrices `A` and `B` in O(n^2) without computing `A*B`, which would be O(n^3). -Generalized leapfrog integrator with fixed step size `ϵ`. - -# Fields - -$(TYPEDFIELDS) +Doesn't actually check that A and B are both n x n matrices. """ -struct GeneralizedLeapfrog{T<:AbstractScalarOrVec{<:AbstractFloat}} <: AbstractLeapfrog{T} - "Step size." - ϵ::T - n::Int -end -function Base.show(io::IO, l::GeneralizedLeapfrog) - return print(io, "GeneralizedLeapfrog(ϵ=", round.(l.ϵ; sigdigits=3), ", n=", l.n, ")") -end - -# Fallback to ignore return_cache & cache kwargs for other ∂H∂θ -function ∂H∂θ_cache(h, θ, r; return_cache=false, cache=nothing) where {T} - dv = ∂H∂θ(h, θ, r) - return return_cache ? (dv, nothing) : dv -end - -# TODO Make sure vectorization works -# TODO Check if tempering is valid -function step( - lf::GeneralizedLeapfrog{T}, - h::Hamiltonian, - z::P, - n_steps::Int=1; - fwd::Bool=n_steps > 0, # simulate hamiltonian backward when n_steps < 0 - full_trajectory::Val{FullTraj}=Val(false), -) where {T<:AbstractScalarOrVec{<:AbstractFloat},P<:PhasePoint,FullTraj} - n_steps = abs(n_steps) # to support `n_steps < 0` cases +tr_product(A::AbstractMatrix, B::AbstractMatrix) = sum(Base.broadcasted(*, A', B)) +"Computes `tr(A*v*v')`, i.e. dot(v,A,v)." +tr_product(A::AbstractMatrix, v::AbstractVector) = sum(Base.broadcasted(*, v, A, v')) - ϵ = fwd ? step_size(lf) : -step_size(lf) - ϵ = ϵ' - res = if FullTraj - Vector{P}(undef, n_steps) - else - z - end +function ∂H∂θ( + h::Hamiltonian{<:AbstractRiemannianMetric,<:GaussianKinetic}, + θ::AbstractVector, + r::AbstractVector, +) + return first(∂H∂θ_cache(h, θ, r)) +end +""" - for i in 1:n_steps - θ_init, r_init = z.θ, z.r - # Tempering - #r = temper(lf, r, (i=i, is_half=true), n_steps) - #! Eq (16) of Girolami & Calderhead (2011) - r_half = copy(r_init) - local cache - for j in 1:(lf.n) - # Reuse cache for the first iteration - if j == 1 - (; value, gradient) = z.ℓπ - elseif j == 2 # cache intermediate values that depends on θ only (which are unchanged) - retval, cache = ∂H∂θ_cache(h, θ_init, r_half; return_cache=true) - (; value, gradient) = retval - else # reuse cache - (; value, gradient) = ∂H∂θ_cache(h, θ_init, r_half; cache=cache) - end - r_half = r_init - ϵ / 2 * gradient - # println("r_half: ", r_half) +""" +@views function ∂H∂θ_cache( + h::Hamiltonian{<:DenseRiemannianMetric{T,<:IdentityMap},<:GaussianKinetic}, + θ::AbstractVector{T}, + r::AbstractVector{T}; + cache=nothing +) where {T} + cache = @something cache begin + log_density, log_density_gradient = h.∂ℓπ∂θ(θ) + # h.metric.map is the IdentityMap + metric = h.metric.G(θ) + # The metric is inverted to be able to compute `tr_product(inv_metric, ...)` efficiently - + # but this may still be a bad idea! + inv_metric = inv(metric) + metric_sensitivities = h.metric.∂G∂θ(θ) + rv1 = map(eachindex(log_density_gradient)) do i + -log_density_gradient[i] + .5 * tr_product(inv_metric, metric_sensitivities[:, :, i]) end - #! Eq (17) of Girolami & Calderhead (2011) - θ_full = copy(θ_init) - term_1 = ∂H∂r(h, θ_init, r_half) # unchanged across the loop - for j in 1:(lf.n) - θ_full = θ_init + ϵ / 2 * (term_1 + ∂H∂r(h, θ_full, r_half)) - # println("θ_full :", θ_full) + (;log_density, inv_metric, metric_sensitivities, rv1) + end + # (;log_density, inv_metric_r, metric_sensitivities, rv1) = cache + inv_metric_r = cache.inv_metric * r + return DualValue( + cache.log_density, + #! Eq (15) of Girolami & Calderhead (2011) + cache.rv1 .- Base.broadcasted(eachindex(cache.rv1)) do i + .5 * tr_product(cache.metric_sensitivities[:, :, i], inv_metric_r) end - #! Eq (18) of Girolami & Calderhead (2011) - (; value, gradient) = ∂H∂θ(h, θ_full, r_half) - r_full = r_half - ϵ / 2 * gradient - # println("r_full: ", r_full) - # Tempering - #r = temper(lf, r, (i=i, is_half=false), n_steps) - # Create a new phase point by caching the logdensity and gradient - z = phasepoint(h, θ_full, r_full; ℓπ=DualValue(value, gradient)) - # Update result - if FullTraj - res[i] = z + ), cache +end + +#! J as defined in middle of the right column of Page 3 of Betancourt (2012) +function make_J(λ::AbstractVector{T}, α::T) where {T<:AbstractFloat} + d = length(λ) + J = Matrix{T}(undef, d, d) + for i in 1:d, j in 1:d + J[i, j] = if (λ[i] == λ[j]) + # Ref: https://www.wolframalpha.com/input?i=derivative+of+x+*+coth%28a+*+x%29 + #! Based on middle of the right column of Page 3 of Betancourt (2012) "Note that whenλi=λj, such as for the diagonal elementsor degenerate eigenvalues, this becomes the derivative" + coth(α * λ[i]) + λ[i] * α * -csch(λ[i] * α)^2 else - res = z - end - if !isfinite(z) - # Remove undef - if FullTraj - res = res[isassigned.(Ref(res), 1:n_steps)] - end - break + ((λ[i] * coth(α * λ[i]) - λ[j] * coth(α * λ[j])) / (λ[i] - λ[j])) end - # @assert false end - return res -end - -# TODO Make the order of θ and r consistent with neg_energy -∂H∂θ(h::Hamiltonian, θ::AbstractVecOrMat, r::AbstractVecOrMat) = ∂H∂θ(h, θ) -∂H∂r(h::Hamiltonian, θ::AbstractVecOrMat, r::AbstractVecOrMat) = ∂H∂r(h, r) - -### hamiltonian.jl - -import AdvancedHMC: refresh, phasepoint -using AdvancedHMC: FullMomentumRefreshment, PartialMomentumRefreshment, AbstractMetric - -# To change L180 of hamiltonian.jl -function phasepoint( - rng::Union{AbstractRNG,AbstractVector{<:AbstractRNG}}, - θ::AbstractVecOrMat{T}, - h::Hamiltonian, -) where {T<:Real} - return phasepoint(h, θ, rand_momentum(rng, h.metric, h.kinetic, θ)) -end - -# To change L191 of hamiltonian.jl -function refresh( - rng::Union{AbstractRNG,AbstractVector{<:AbstractRNG}}, - ::FullMomentumRefreshment, - h::Hamiltonian, - z::PhasePoint, -) - return phasepoint(h, z.θ, rand_momentum(rng, h.metric, h.kinetic, z.θ)) -end - -# To change L215 of hamiltonian.jl -function refresh( - rng::Union{AbstractRNG,AbstractVector{<:AbstractRNG}}, - ref::PartialMomentumRefreshment, - h::Hamiltonian, - z::PhasePoint, -) - return phasepoint( - h, - z.θ, - ref.α * z.r + sqrt(1 - ref.α^2) * rand_momentum(rng, h.metric, h.kinetic, z.θ), - ) -end - -### metric.jl - -import AdvancedHMC: _rand -using AdvancedHMC: AbstractMetric -using LinearAlgebra: eigen, cholesky, Symmetric - -abstract type AbstractRiemannianMetric <: AbstractMetric end - -abstract type AbstractHessianMap end - -struct IdentityMap <: AbstractHessianMap end - -(::IdentityMap)(x) = x - -struct SoftAbsMap{T} <: AbstractHessianMap - α::T -end - -# TODO Register softabs with ReverseDiff -#! The definition of SoftAbs from Page 3 of Betancourt (2012) -function softabs(X, α=20.0) - F = eigen(X) # ReverseDiff cannot diff through `eigen` - Q = hcat(F.vectors) - λ = F.values - softabsλ = λ .* coth.(α * λ) - return Q * diagm(softabsλ) * Q', Q, λ, softabsλ -end - -(map::SoftAbsMap)(x) = softabs(x, map.α)[1] - -struct DenseRiemannianMetric{ - T, - TM<:AbstractHessianMap, - A<:Union{Tuple{Int},Tuple{Int,Int}}, - AV<:AbstractVecOrMat{T}, - TG, - T∂G∂θ, -} <: AbstractRiemannianMetric - size::A - G::TG # TODO store G⁻¹ here instead - ∂G∂θ::T∂G∂θ - map::TM - _temp::AV + return J end -# TODO Make dense mass matrix support matrix-mode parallel -function DenseRiemannianMetric(size, G, ∂G∂θ, map=IdentityMap()) where {T<:AbstractFloat} - _temp = Vector{Float64}(undef, size[1]) - return DenseRiemannianMetric(size, G, ∂G∂θ, map, _temp) -end -# DenseEuclideanMetric(::Type{T}, D::Int) where {T} = DenseEuclideanMetric(Matrix{T}(I, D, D)) -# DenseEuclideanMetric(D::Int) = DenseEuclideanMetric(Float64, D) -# DenseEuclideanMetric(::Type{T}, sz::Tuple{Int}) where {T} = DenseEuclideanMetric(Matrix{T}(I, first(sz), first(sz))) -# DenseEuclideanMetric(sz::Tuple{Int}) = DenseEuclideanMetric(Float64, sz) +@views function ∂H∂θ_cache( + h::Hamiltonian{<:DenseRiemannianMetric{T,<:SoftAbsMap},<:GaussianKinetic}, + θ::AbstractVector{T}, + r::AbstractVector{T}; + cache=nothing, +) where {T} + cache = @something cache begin + log_density, log_density_gradient = h.∂ℓπ∂θ(θ) + premetric = h.metric.G(θ) + premetric_sensitivities = h.metric.∂G∂θ(θ) + metric, Q, λ, softabsλ = softabs(premetric, h.metric.map.α) + J = make_J(λ, h.metric.map.α) -# renew(ue::DenseEuclideanMetric, M⁻¹) = DenseEuclideanMetric(M⁻¹) + #! Based on the two equations from the right column of Page 3 of Betancourt (2012) + tmpv = diag(J) ./ softabsλ + tmpm = Q * Diagonal(tmpv) * Q' -Base.size(e::DenseRiemannianMetric) = e.size -Base.size(e::DenseRiemannianMetric, dim::Int) = e.size[dim] -Base.show(io::IO, dem::DenseRiemannianMetric) = print(io, "DenseRiemannianMetric(...)") + rv1 = map(eachindex(log_density_gradient)) do i + -log_density_gradient[i] + .5 * tr_product(tmpm, premetric_sensitivities[:, :, i]) + end + (;log_density, Q, softabsλ, tmpv, tmpm, rv1) + end + cache.tmpv .= (cache.Q' * r) ./ cache.softabsλ + cache.tmpm .= Q * (J .* cache.tmpv .* cache.tmpv') * Q' -function rand_momentum( - rng::Union{AbstractRNG,AbstractVector{<:AbstractRNG}}, - metric::DenseRiemannianMetric{T}, - kinetic, - θ::AbstractVecOrMat, -) where {T} - r = _randn(rng, T, size(metric)...) - G⁻¹ = inv(metric.map(metric.G(θ))) - chol = cholesky(Symmetric(G⁻¹)) - ldiv!(chol.U, r) - return r + return DualValue( + cache.log_density, + cache.rv1 .- Base.broadcasted(eachindex(cache.rv1)) do i + .5 * tr_product(cache.tmpm, cache.premetric_sensitivities[:, :, i]) + end + ), cache end -### hamiltonian.jl - -import AdvancedHMC: phasepoint, neg_energy, ∂H∂θ, ∂H∂r -using LinearAlgebra: logabsdet, tr - # QUES Do we want to change everything to position dependent by default? # Add θ to ∂H∂r for DenseRiemannianMetric function phasepoint( @@ -236,10 +121,9 @@ function phasepoint( return PhasePoint(θ, r, ℓπ, ℓκ) end -# Negative kinetic energy #! Eq (13) of Girolami & Calderhead (2011) function neg_energy( - h::Hamiltonian{<:DenseRiemannianMetric}, r::T, θ::T + h::Hamiltonian{<:DenseRiemannianMetric,<:GaussianKinetic}, r::T, θ::T ) where {T<:AbstractVecOrMat} G = h.metric.map(h.metric.G(θ)) D = size(G, 1) @@ -248,111 +132,3 @@ function neg_energy( mul!(h.metric._temp, inv(G), r) return -logZ - dot(r, h.metric._temp) / 2 end - -# QUES L31 of hamiltonian.jl now reads a bit weird (semantically) -function ∂H∂θ( - h::Hamiltonian{<:DenseRiemannianMetric{T,<:IdentityMap}}, - θ::AbstractVecOrMat{T}, - r::AbstractVecOrMat{T}, -) where {T} - ℓπ, ∂ℓπ∂θ = h.∂ℓπ∂θ(θ) - G = h.metric.map(h.metric.G(θ)) - invG = inv(G) - ∂G∂θ = h.metric.∂G∂θ(θ) - d = length(∂ℓπ∂θ) - return DualValue( - ℓπ, - #! Eq (15) of Girolami & Calderhead (2011) - -mapreduce(vcat, 1:d) do i - ∂G∂θᵢ = ∂G∂θ[:, :, i] - ∂ℓπ∂θ[i] - 1 / 2 * tr(invG * ∂G∂θᵢ) + 1 / 2 * r' * invG * ∂G∂θᵢ * invG * r - # Gr = G \ r - # ∂ℓπ∂θ[i] - 1 / 2 * tr(G \ ∂G∂θᵢ) + 1 / 2 * Gr' * ∂G∂θᵢ * Gr - # 1 / 2 * tr(invG * ∂G∂θᵢ) - # 1 / 2 * r' * invG * ∂G∂θᵢ * invG * r - end, - ) -end - -# Ref: https://www.wolframalpha.com/input?i=derivative+of+x+*+coth%28a+*+x%29 -#! Based on middle of the right column of Page 3 of Betancourt (2012) "Note that whenλi=λj, such as for the diagonal elementsor degenerate eigenvalues, this becomes the derivative" -dsoftabsdλ(α, λ) = coth(α * λ) + λ * α * -csch(λ * α)^2 - -#! J as defined in middle of the right column of Page 3 of Betancourt (2012) -function make_J(λ::AbstractVector{T}, α::T) where {T<:AbstractFloat} - d = length(λ) - J = Matrix{T}(undef, d, d) - for i in 1:d, j in 1:d - J[i, j] = if (λ[i] == λ[j]) - dsoftabsdλ(α, λ[i]) - else - ((λ[i] * coth(α * λ[i]) - λ[j] * coth(α * λ[j])) / (λ[i] - λ[j])) - end - end - return J -end - -function ∂H∂θ( - h::Hamiltonian{<:DenseRiemannianMetric{T,<:SoftAbsMap}}, - θ::AbstractVecOrMat{T}, - r::AbstractVecOrMat{T}, -) where {T} - return ∂H∂θ_cache(h, θ, r) -end -function ∂H∂θ_cache( - h::Hamiltonian{<:DenseRiemannianMetric{T,<:SoftAbsMap}}, - θ::AbstractVecOrMat{T}, - r::AbstractVecOrMat{T}; - return_cache=false, - cache=nothing, -) where {T} - # Terms that only dependent on θ can be cached in θ-unchanged loops - if isnothing(cache) - ℓπ, ∂ℓπ∂θ = h.∂ℓπ∂θ(θ) - H = h.metric.G(θ) - ∂H∂θ = h.metric.∂G∂θ(θ) - - G, Q, λ, softabsλ = softabs(H, h.metric.map.α) - - R = diagm(1 ./ softabsλ) - - # softabsΛ = diagm(softabsλ) - # M = inv(softabsΛ) * Q' * r - # M = R * Q' * r # equiv to above but avoid inv - - J = make_J(λ, h.metric.map.α) - - #! Based on the two equations from the right column of Page 3 of Betancourt (2012) - term_1_cached = Q * (R .* J) * Q' - else - ℓπ, ∂ℓπ∂θ, ∂H∂θ, Q, softabsλ, J, term_1_cached = cache - end - d = length(∂ℓπ∂θ) - D = diagm((Q' * r) ./ softabsλ) - term_2_cached = Q * D * J * D * Q' - g = - -mapreduce(vcat, 1:d) do i - ∂H∂θᵢ = ∂H∂θ[:, :, i] - # ∂ℓπ∂θ[i] - 1 / 2 * tr(term_1_cached * ∂H∂θᵢ) + 1 / 2 * M' * (J .* (Q' * ∂H∂θᵢ * Q)) * M # (v1) - # NOTE Some further optimization can be done here: cache the 1st product all together - ∂ℓπ∂θ[i] - 1 / 2 * tr(term_1_cached * ∂H∂θᵢ) + 1 / 2 * tr(term_2_cached * ∂H∂θᵢ) # (v2) cache friendly - end - - dv = DualValue(ℓπ, g) - return return_cache ? (dv, (; ℓπ, ∂ℓπ∂θ, ∂H∂θ, Q, softabsλ, J, term_1_cached)) : dv -end - -#! Eq (14) of Girolami & Calderhead (2011) -function ∂H∂r( - h::Hamiltonian{<:DenseRiemannianMetric}, θ::AbstractVecOrMat, r::AbstractVecOrMat -) - H = h.metric.G(θ) - # if !all(isfinite, H) - # println("θ: ", θ) - # println("H: ", H) - # end - G = h.metric.map(H) - # return inv(G) * r - # println("G \ r: ", G \ r) - return G \ r # NOTE it's actually pretty weird that ∂H∂θ returns DualValue but ∂H∂r doesn't -end diff --git a/src/riemannian/metric.jl b/src/riemannian/metric.jl new file mode 100644 index 00000000..41d11127 --- /dev/null +++ b/src/riemannian/metric.jl @@ -0,0 +1,63 @@ +abstract type AbstractRiemannianMetric <: AbstractMetric end + +abstract type AbstractHessianMap end + +struct IdentityMap <: AbstractHessianMap end + +(::IdentityMap)(x) = x + +struct SoftAbsMap{T} <: AbstractHessianMap + α::T +end + +function softabs(X, α=20.0) + F = eigen(X) # ReverseDiff cannot diff through `eigen` + Q = hcat(F.vectors) + λ = F.values + softabsλ = λ .* coth.(α * λ) + return Q * diagm(softabsλ) * Q', Q, λ, softabsλ +end + +(map::SoftAbsMap)(x) = softabs(x, map.α)[1] + +# TODO Register softabs with ReverseDiff +#! The definition of SoftAbs from Page 3 of Betancourt (2012) +struct DenseRiemannianMetric{ + T, + TM<:AbstractHessianMap, + A<:Union{Tuple{Int},Tuple{Int,Int}}, + AV<:AbstractVecOrMat{T}, + TG, + T∂G∂θ, +} <: AbstractRiemannianMetric + size::A + G::TG # TODO store G⁻¹ here instead + ∂G∂θ::T∂G∂θ + map::TM + _temp::AV +end + +# TODO Make dense mass matrix support matrix-mode parallel +function DenseRiemannianMetric(size, G, ∂G∂θ, map=IdentityMap()) + _temp = Vector{Float64}(undef, first(size)) + return DenseRiemannianMetric(size, G, ∂G∂θ, map, _temp) +end + +Base.size(e::DenseRiemannianMetric) = e.size +Base.size(e::DenseRiemannianMetric, dim::Int) = e.size[dim] +function Base.show(io::IO, drm::DenseRiemannianMetric) + return print(io, "DenseRiemannianMetric$(drm.size) with $(drm.map) metric") +end + +function rand_momentum( + rng::Union{AbstractRNG,AbstractVector{<:AbstractRNG}}, + metric::DenseRiemannianMetric{T}, + kinetic, + θ::AbstractVecOrMat, +) where {T} + r = _randn(rng, T, size(metric)...) + G⁻¹ = inv(metric.map(metric.G(θ))) + chol = cholesky(Symmetric(G⁻¹)) + ldiv!(chol.U, r) + return r +end diff --git a/src/trajectory.jl b/src/trajectory.jl index 66246e74..2e3c1d55 100644 --- a/src/trajectory.jl +++ b/src/trajectory.jl @@ -292,7 +292,7 @@ function transition( hamiltonian_energy=H, hamiltonian_energy_error=H - H0, # check numerical error in proposed phase point. - numerical_error=!all(isfinite, H′), + numerical_error=(!all(isfinite, H′)), ), stat(τ.integrator), ) @@ -727,7 +727,7 @@ function transition( ( n_steps=tree.nα, is_accept=true, - acceptance_rate=tree.sum_α / tree.nα, + acceptance_rate=(tree.sum_α / tree.nα), log_density=zcand.ℓπ.value, hamiltonian_energy=H, hamiltonian_energy_error=H - H0, diff --git a/test/Project.toml b/test/Project.toml index 5b5191c4..9c9663bf 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -6,12 +6,14 @@ Bijectors = "76274a88-744f-5084-9051-94815aaf08c4" ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66" Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" +FiniteDiff = "6a86dc24-6348-571c-b903-95158fe2bd41" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" JET = "c3a54625-cd67-489e-a8e7-0a5a0ff4e31b" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" LogDensityProblems = "6fdf6af0-433a-55f7-b3ed-c6c6e0b8df7c" LogDensityProblemsAD = "996a588d-648d-4e1f-a8f0-a84b347e47b1" MCMCChains = "c7f686f2-ff18-58e9-bc7b-31028e88f75d" +MCMCLogDensityProblems = "8a639fad-7908-4fe4-8003-906e9297f002" OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" diff --git a/test/riemannian.jl b/test/riemannian.jl index 67b1cad0..f1152215 100644 --- a/test/riemannian.jl +++ b/test/riemannian.jl @@ -1,28 +1,63 @@ -using ReTest, AdvancedHMC - -include("../src/riemannian_hmc.jl") -include("../src/riemannian_hmc_utility.jl") - +using ReTest, Random +using AdvancedHMC, ForwardDiff, AbstractMCMC +using LinearAlgebra +using MCMCLogDensityProblems using FiniteDiff: finite_difference_gradient, finite_difference_hessian, finite_difference_jacobian -using Distributions: MvNormal -using AdvancedHMC: neg_energy, energy +using AdvancedHMC: neg_energy, energy, ∂H∂θ, ∂H∂r + +# Fisher information metric +function gen_∂G∂θ_fwd(Vfunc, x; f=identity) + _Hfunc = gen_hess_fwd(Vfunc, x) + Hfunc = x -> _Hfunc(x)[3] + # QUES What's the best output format of this function? + cfg = ForwardDiff.JacobianConfig(Hfunc, x) + d = length(x) + out = zeros(eltype(x), d^2, d) + return x -> ForwardDiff.jacobian!(out, Hfunc, x, cfg) + return out # default output shape [∂H∂x₁; ∂H∂x₂; ...] +end + +function gen_hess_fwd(func, x::AbstractVector) + function hess(x::AbstractVector) + return nothing, nothing, ForwardDiff.hessian(func, x) + end + return hess +end + +function reshape_∂G∂θ(H) + d = size(H, 2) + return cat((H[((i - 1) * d + 1):(i * d), :] for i in 1:d)...; dims=3) +end -# Taken from https://github.com/JuliaDiff/FiniteDiff.jl/blob/master/test/finitedifftests.jl -δ(a, b) = maximum(abs.(a - b)) +function prepare_sample(ℓπ, initial_θ, λ) + Vfunc = x -> -ℓπ(x) + _Hfunc = MCMCLogDensityProblems.gen_hess(Vfunc, initial_θ) # x -> (value, gradient, hessian) + Hfunc = x -> copy.(_Hfunc(x)) # _Hfunc do in-place computation, copy to avoid bug -@testset "Riemannian" begin - hps = (; λ=1e-2, α=20.0, ϵ=0.1, n=6, L=8) + fstabilize = H -> H + λ * I + Gfunc = x -> begin + H = fstabilize(Hfunc(x)[3]) + all(isfinite, H) ? H : diagm(ones(length(x))) + end + _∂G∂θfunc = gen_∂G∂θ_fwd(x -> -ℓπ(x), initial_θ; f=fstabilize) + ∂G∂θfunc = x -> reshape_∂G∂θ(_∂G∂θfunc(x)) + + return Vfunc, Hfunc, Gfunc, ∂G∂θfunc +end +@testset "Constructors tests" begin + δ(a, b) = maximum(abs.(a - b)) @testset "$(nameof(typeof(target)))" for target in [HighDimGaussian(2), Funnel()] rng = MersenneTwister(1110) + λ = 1e-2 θ₀ = rand(rng, dim(target)) ℓπ = MCMCLogDensityProblems.gen_logpdf(target) ∂ℓπ∂θ = MCMCLogDensityProblems.gen_logpdf_grad(target, θ₀) - Vfunc, Hfunc, Gfunc, ∂G∂θfunc = prepare_sample_target(hps, θ₀, ℓπ) + Vfunc, Hfunc, Gfunc, ∂G∂θfunc = prepare_sample(ℓπ, θ₀, λ) D = dim(target) # ==2 for this test x = zeros(D) # randn(rng, D) @@ -36,7 +71,7 @@ using AdvancedHMC: neg_energy, energy end @testset "$(nameof(typeof(hessmap)))" for hessmap in - [IdentityMap(), SoftAbsMap(hps.α)] + [IdentityMap(), SoftAbsMap(20.0)] metric = DenseRiemannianMetric((D,), Gfunc, ∂G∂θfunc, hessmap) kinetic = GaussianKinetic() hamiltonian = Hamiltonian(metric, kinetic, ℓπ, ∂ℓπ∂θ) @@ -67,3 +102,62 @@ using AdvancedHMC: neg_energy, energy end end end + +@testset "Multi variate Normal with Riemannian HMC" begin + # Set the number of samples to draw and warmup iterations + n_samples = 2_000 + rng = MersenneTwister(1110) + initial_θ = rand(rng, D) + λ = 1e-2 + _, _, G, ∂G∂θ = prepare_sample(ℓπ, initial_θ, λ) + # Define a Hamiltonian system + metric = DenseRiemannianMetric((D,), G, ∂G∂θ) + kinetic = GaussianKinetic() + hamiltonian = Hamiltonian(metric, kinetic, ℓπ, ∂ℓπ∂θ) + + # Define a leapfrog solver, with the initial step size chosen heuristically + initial_ϵ = 0.01 + integrator = GeneralizedLeapfrog(initial_ϵ, 6) + + # Define an HMC sampler with the following components + # - multinomial sampling scheme, + # - generalised No-U-Turn criteria, and + kernel = HMCKernel(Trajectory{EndPointTS}(integrator, FixedNSteps(8))) + + # Run the sampler to draw samples from the specified Gaussian, where + # - `samples` will store the samples + # - `stats` will store diagnostic statistics for each sample + samples, stats = sample(rng, hamiltonian, kernel, initial_θ, n_samples; progress=true) + @test length(samples) == n_samples + @test length(stats) == n_samples +end + +@testset "Multi variate Normal with Riemannian HMC softabs metric" begin + # Set the number of samples to draw and warmup iterations + n_samples = 2_000 + rng = MersenneTwister(1110) + initial_θ = rand(rng, D) + λ = 1e-2 + _, _, G, ∂G∂θ = prepare_sample(ℓπ, initial_θ, λ) + + # Define a Hamiltonian system + metric = DenseRiemannianMetric((D,), G, ∂G∂θ, SoftAbsMap(20.0)) + kinetic = GaussianKinetic() + hamiltonian = Hamiltonian(metric, kinetic, ℓπ, ∂ℓπ∂θ) + + # Define a leapfrog solver, with the initial step size chosen heuristically + initial_ϵ = 0.01 + integrator = GeneralizedLeapfrog(initial_ϵ, 6) + + # Define an HMC sampler with the following components + # - multinomial sampling scheme, + # - generalised No-U-Turn criteria, and + kernel = HMCKernel(Trajectory{EndPointTS}(integrator, FixedNSteps(8))) + + # Run the sampler to draw samples from the specified Gaussian, where + # - `samples` will store the samples + # - `stats` will store diagnostic statistics for each sample + samples, stats = sample(rng, hamiltonian, kernel, initial_θ, n_samples; progress=true) + @test length(samples) == n_samples + @test length(stats) == n_samples +end diff --git a/test/runtests.jl b/test/runtests.jl index d0fb6ea8..fa816e8b 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -31,6 +31,7 @@ if GROUP == "All" || GROUP == "AdvancedHMC" include("abstractmcmc.jl") include("mcmcchains.jl") include("constructors.jl") + include("riemannian.jl") retest(; dry=false, verbose=Inf) end