@@ -65,17 +65,18 @@ f = GP(Matern52Kernel())
6565# md nothing #hide
6666
6767# We create a finite dimensional projection at the inputs of the training dataset
68- # observed under Gaussian noise with variance $\sigma^2 = 0.1$, and compute the
68+ # observed under Gaussian noise with variance $noise_var = 0.1$, and compute the
6969# log-likelihood of the outputs of the training dataset.
7070
71- fx = f (x_train, 0.1 )
71+ noise_var = 0.1
72+ fx = f (x_train, noise_var)
7273logpdf (fx, y_train)
7374
7475# We compute the posterior Gaussian process given the training data, and calculate the
7576# log-likelihood of the test dataset.
7677
7778p_fx = posterior (fx, y_train)
78- logpdf (p_fx (x_test), y_test)
79+ logpdf (p_fx (x_test, noise_var ), y_test)
7980
8081# We plot the posterior Gaussian process (its mean and a ribbon of 2 standard deviations
8182# around it) on a grid along with the observations.
@@ -111,7 +112,7 @@ function gp_loglikelihood(x, y)
111112 kernel =
112113 softplus (params[1 ]) * (Matern52Kernel () ∘ ScaleTransform (softplus (params[2 ])))
113114 f = GP (kernel)
114- fx = f (x, 0.1 )
115+ fx = f (x, noise_var )
115116 return logpdf (fx, y)
116117 end
117118 return loglikelihood
@@ -229,10 +230,10 @@ vline!(mean_samples'; linewidth=2)
229230function gp_posterior (x, y, p)
230231 kernel = softplus (p[1 ]) * (Matern52Kernel () ∘ ScaleTransform (softplus (p[2 ])))
231232 f = GP (kernel)
232- return posterior (f (x, 0.1 ), y)
233+ return posterior (f (x, noise_var ), y)
233234end
234235
235- mean (logpdf (gp_posterior (x_train, y_train, p)(x_test), y_test) for p in samples)
236+ mean (logpdf (gp_posterior (x_train, y_train, p)(x_test, noise_var ), y_test) for p in samples)
236237
237238# We sample 5 functions from each posterior GP given by the final 100 samples of kernel
238239# parameters.
@@ -385,7 +386,7 @@ function objective_function(x, y)
385386 kernel =
386387 softplus (params[1 ]) * (Matern52Kernel () ∘ ScaleTransform (softplus (params[2 ])))
387388 f = GP (kernel)
388- fx = f (x, 0.1 )
389+ fx = f (x, noise_var )
389390 z = logistic .(params[3 : end ])
390391 approx = VFE (f (z, jitter))
391392 return - elbo (approx, fx, y)
@@ -420,9 +421,9 @@ opt_kernel =
420421 softplus (opt. minimizer[1 ]) *
421422 (Matern52Kernel () ∘ ScaleTransform (softplus (opt. minimizer[2 ])))
422423opt_f = GP (opt_kernel)
423- opt_fx = opt_f (x_train, 0.1 )
424+ opt_fx = opt_f (x_train, noise_var )
424425ap = posterior (VFE (opt_f (logistic .(opt. minimizer[3 : end ]), jitter)), opt_fx, y_train)
425- logpdf (ap (x_test), y_test)
426+ logpdf (ap (x_test, noise_var ), y_test)
426427
427428# We visualize the approximate posterior with optimized parameters.
428429
@@ -460,7 +461,7 @@ function loss_function(x, y)
460461 kernel =
461462 softplus (params[1 ]) * (Matern52Kernel () ∘ ScaleTransform (softplus (params[2 ])))
462463 f = GP (kernel)
463- fx = f (x, 0.1 )
464+ fx = f (x, noise_var )
464465 return - logpdf (fx, y)
465466 end
466467 return negativelogmarginallikelihood
@@ -496,9 +497,9 @@ opt_kernel =
496497 (Matern52Kernel () ∘ ScaleTransform (softplus (opt. minimizer[2 ])))
497498
498499opt_f = GP (opt_kernel)
499- opt_fx = opt_f (x_train, 0.1 )
500+ opt_fx = opt_f (x_train, noise_var )
500501opt_p_fx = posterior (opt_fx, y_train)
501- logpdf (opt_p_fx (x_test), y_test)
502+ logpdf (opt_p_fx (x_test, noise_var ), y_test)
502503
503504# We visualize the posterior with optimized parameters.
504505
0 commit comments