@@ -101,7 +101,7 @@ Similarily to the multivariate ADVI example, we could use `Stacked` to get a _bo
101101``` @repl normalizing-flows
102102d = MvNormal(zeros(2), ones(2));
103103ibs = inverse.(bijector.((InverseGamma(2, 3), Beta())));
104- sb = stack (ibs... ) # == Stacked(ibs) == Stacked(ibs, [i:i for i = 1:length(ibs)]
104+ sb = Stacked (ibs) # == Stacked(ibs, [i:i for i = 1:length(ibs)]
105105b = sb ∘ PlanarLayer(2)
106106td = transformed(d, b);
107107y = rand(rng, td)
@@ -128,7 +128,7 @@ struct NLLObjective{R,D,T}
128128 data::T
129129end
130130
131- function (obj::NLLObjective)(θs... )
131+ function (obj::NLLObjective)(θs)
132132 transformed_dist = transformed(obj.basedist, obj.reconstruct(θs))
133133 return -sum(Base.Fix1(logpdf, transformed_dist), eachcol(obj.data))
134134end
@@ -140,19 +140,19 @@ xs = randn(2, 1000);
140140f = NLLObjective(reconstruct, MvNormal(2, 1), xs);
141141
142142# Initial loss.
143- @info "Initial loss: $(f(θs... ))"
143+ @info "Initial loss: $(f(θs))"
144144
145145# Train using gradient descent.
146146ε = 1e-3;
147147for i in 1:100
148- ∇s = Zygote.gradient(f, θs... )
149- θs = map (θs, ∇s) do θ, ∇
148+ (∇s,) = Zygote.gradient(f, θs)
149+ θs = fmap (θs, ∇s) do θ, ∇
150150 θ - ε .* ∇
151151 end
152152end
153153
154154# Final loss
155- @info "Finall loss: $(f(θs... ))"
155+ @info "Final loss: $(f(θs))"
156156
157157# Very simple check to see if we learned something useful.
158158samples = rand(transformed(f.basedist, f.reconstruct(θs)), 1000);
0 commit comments