Skip to content

Commit 8463fd6

Browse files
authored
Fixed bug in docs as reported in #329 (#330)
* Fixed bug in docs as reported in #329 * Removed usage of removed `stack` in favour of `Stacked` in docs example as reported in #329
1 parent 70fb426 commit 8463fd6

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

docs/src/examples.md

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ Similarily to the multivariate ADVI example, we could use `Stacked` to get a _bo
101101
```@repl normalizing-flows
102102
d = MvNormal(zeros(2), ones(2));
103103
ibs = inverse.(bijector.((InverseGamma(2, 3), Beta())));
104-
sb = stack(ibs...) # == Stacked(ibs) == Stacked(ibs, [i:i for i = 1:length(ibs)]
104+
sb = Stacked(ibs) # == Stacked(ibs, [i:i for i = 1:length(ibs)]
105105
b = sb ∘ PlanarLayer(2)
106106
td = transformed(d, b);
107107
y = rand(rng, td)
@@ -128,7 +128,7 @@ struct NLLObjective{R,D,T}
128128
data::T
129129
end
130130
131-
function (obj::NLLObjective)(θs...)
131+
function (obj::NLLObjective)(θs)
132132
transformed_dist = transformed(obj.basedist, obj.reconstruct(θs))
133133
return -sum(Base.Fix1(logpdf, transformed_dist), eachcol(obj.data))
134134
end
@@ -140,19 +140,19 @@ xs = randn(2, 1000);
140140
f = NLLObjective(reconstruct, MvNormal(2, 1), xs);
141141
142142
# Initial loss.
143-
@info "Initial loss: $(f(θs...))"
143+
@info "Initial loss: $(f(θs))"
144144
145145
# Train using gradient descent.
146146
ε = 1e-3;
147147
for i in 1:100
148-
∇s = Zygote.gradient(f, θs...)
149-
θs = map(θs, ∇s) do θ, ∇
148+
(∇s,) = Zygote.gradient(f, θs)
149+
θs = fmap(θs, ∇s) do θ, ∇
150150
θ - ε .* ∇
151151
end
152152
end
153153
154154
# Final loss
155-
@info "Finall loss: $(f(θs...))"
155+
@info "Final loss: $(f(θs))"
156156
157157
# Very simple check to see if we learned something useful.
158158
samples = rand(transformed(f.basedist, f.reconstruct(θs)), 1000);

0 commit comments

Comments
 (0)