基于变分推断的贝叶斯神经网络详解(keras实现)

基于变分推断的贝叶斯神经网络详解(keras实现)

版权声明:本文为博主原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。
本文链接:https://blog.csdn.net/weixin_38314865/article/details/98468583

http://krasserm.github.io/2019/03/14/bayesian-neural-networks/

  1.  
    import numpy as np
  2.  
    import matplotlib.pyplot as plt
  3.  
     
  4.  
    %matplotlib inline
  5.  
     
  6.  
    def f(x, sigma):
  7.  
    epsilon = np.random.randn(*x.shape) * sigma
  8.  
    return 10 * np.sin(2 * np.pi * (x)) + epsilon
  9.  
     
  10.  
    train_size = 32
  11.  
    noise = 1.0
  12.  
     
  13.  
    X = np.linspace(-0.5, 0.5, train_size).reshape(-1, 1)
  14.  
    y = f(X, sigma=noise)
  15.  
    y_true = f(X, sigma=0.0)
  16.  
     
  17.  
    plt.scatter(X, y, marker='+', label='Training data')
  18.  
    plt.plot(X, y_true, label='Truth')
  19.  
    plt.title('Noisy training data and ground truth')
  20.  
    plt.legend();

png

  1.  
    from keras import backend as K
  2.  
    from keras import activations, initializers
  3.  
    from keras.layers import Layer
  4.  
     
  5.  
    import tensorflow as tf
  6.  
     
  7.  
    def mixture_prior_params(sigma_1, sigma_2, pi, return_sigma=False):
  8.  
    params = K.variable([sigma_1, sigma_2, pi], name='mixture_prior_params')
  9.  
    sigma = np.sqrt(pi * sigma_1 ** 2 + (1 - pi) * sigma_2 ** 2)
  10.  
    return params, sigma
  11.  
     
  12.  
    def log_mixture_prior_prob(w):
  13.  
    comp_1_dist = tf.distributions.Normal(0.0, prior_params[0])
  14.  
    comp_2_dist = tf.distributions.Normal(0.0, prior_params[1])
  15.  
    comp_1_weight = prior_params[2]
  16.  
    return K.log(comp_1_weight * comp_1_dist.prob(w) + (1 - comp_1_weight) * comp_2_dist.prob(w))
  17.  
     
  18.  
    # Mixture prior parameters shared across DenseVariational layer instances
  19.  
    prior_params, prior_sigma = mixture_prior_params(sigma_1=1.0, sigma_2=0.1, pi=0.2)
  20.  
     
  21.  
    class DenseVariational(Layer):
  22.  
    def __init__(self, output_dim, kl_loss_weight, activation=None, **kwargs):
  23.  
    self.output_dim = output_dim
  24.  
    self.kl_loss_weight = kl_loss_weight
  25.  
    self.activation = activations.get(activation)
  26.  
    super().__init__(**kwargs)
  27.  
     
  28.  
    def build(self, input_shape):
  29.  
    self._trainable_weights.append(prior_params)
  30.  
     
  31.  
    self.kernel_mu = self.add_weight(name='kernel_mu',
  32.  
    shape=(input_shape[1], self.output_dim),
  33.  
    initializer=initializers.normal(stddev=prior_sigma),
  34.  
    trainable=True)
  35.  
    self.bias_mu = self.add_weight(name='bias_mu',
  36.  
    shape=(self.output_dim,),
  37.  
    initializer=initializers.normal(stddev=prior_sigma),
  38.  
    trainable=True)
  39.  
    self.kernel_rho = self.add_weight(name='kernel_rho',
  40.  
    shape=(input_shape[1], self.output_dim),
  41.  
    initializer=initializers.constant(0.0),
  42.  
    trainable=True)
  43.  
    self.bias_rho = self.add_weight(name='bias_rho',
  44.  
    shape=(self.output_dim,),
  45.  
    initializer=initializers.constant(0.0),
  46.  
    trainable=True)
  47.  
    super().build(input_shape)
  48.  
     
  49.  
    def call(self, x):
  50.  
    kernel_sigma = tf.math.softplus(self.kernel_rho)
  51.  
    kernel = self.kernel_mu + kernel_sigma * tf.random.normal(self.kernel_mu.shape)
  52.  
     
  53.  
    bias_sigma = tf.math.softplus(self.bias_rho)
  54.  
    bias = self.bias_mu + bias_sigma * tf.random.normal(self.bias_mu.shape)
  55.  
     
  56.  
    self.add_loss(self.kl_loss(kernel, self.kernel_mu, kernel_sigma) +
  57.  
    self.kl_loss(bias, self.bias_mu, bias_sigma))
  58.  
     
  59.  
    return self.activation(K.dot(x, kernel) + bias)
  60.  
     
  61.  
    def compute_output_shape(self, input_shape):
  62.  
    return (input_shape[0], self.output_dim)
  63.  
     
  64.  
    def kl_loss(self, w, mu, sigma):
  65.  
    variational_dist = tf.distributions.Normal(mu, sigma)
  66.  
    return kl_loss_weight * K.sum(variational_dist.log_prob(w) - log_mixture_prior_prob(w))

  1.  
    from keras.layers import Input
  2.  
    from keras.models import Model
  3.  
     
  4.  
    batch_size = train_size
  5.  
    num_batches = train_size / batch_size
  6.  
    kl_loss_weight = 1.0 / num_batches
  7.  
     
  8.  
    x_in = Input(shape=(1,))
  9.  
    x = DenseVariational(20, kl_loss_weight=kl_loss_weight, activation='relu')(x_in)
  10.  
    x = DenseVariational(20, kl_loss_weight=kl_loss_weight, activation='relu')(x)
  11.  
    x = DenseVariational(1, kl_loss_weight=kl_loss_weight)(x)
  12.  
     
  13.  
    model = Model(x_in, x)

  1.  
    from keras import callbacks, optimizers
  2.  
     
  3.  
    def neg_log_likelihood(y_obs, y_pred, sigma=noise):
  4.  
    dist = tf.distributions.Normal(loc=y_pred, scale=sigma)
  5.  
    return K.sum(-dist.log_prob(y_obs))
  6.  
     
  7.  
    model.compile(loss=neg_log_likelihood, optimizer=optimizers.Adam(lr=0.03), metrics=['mse'])
  8.  
    model.fit(X, y, batch_size=batch_size, epochs=1500, verbose=0);

  1.  
    import tqdm
  2.  
     
  3.  
    X_test = np.linspace(-1.5, 1.5, 1000).reshape(-1, 1)
  4.  
    y_pred_list = []
  5.  
     
  6.  
    for i in tqdm.tqdm(range(500)):
  7.  
    y_pred = model.predict(X_test)
  8.  
    y_pred_list.append(y_pred)
  9.  
     
  10.  
    y_preds = np.concatenate(y_pred_list, axis=1)
  11.  
     
  12.  
    y_mean = np.mean(y_preds, axis=1)
  13.  
    y_sigma = np.std(y_preds, axis=1)
  14.  
     
  15.  
    plt.plot(X_test, y_mean, 'r-', label='Predictive mean');
  16.  
    plt.scatter(X, y, marker='+', label='Training data')
  17.  
    plt.fill_between(X_test.ravel(),
  18.  
    y_mean + 2 * y_sigma,
  19.  
    y_mean - 2 * y_sigma,
  20.  
    alpha=0.5, label='Epistemic uncertainty')
  21.  
    plt.title('Prediction')
  22.  
    plt.legend();

png

 

posted on 2019-11-22 17:00  曹明  阅读(1389)  评论(0)    收藏  举报