Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def make_regularizer(uncertainty=True):
if uncertainty:
return ed.regularizers.normal_kl_divergence(stddev=prior_stddev)
else:
return None
super().__init__()
self.hidden_layer_dim = hidden_layer_dim
# 1. RNN layer.
cells = []
for _ in range(num_rnn_layers):
# TODO(dusenberrymw): Determine if a grad-clipped version is needed.
lstm_cell = rank1_bnn_layers.LSTMCellRank1(
rnn_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
recurrent_alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
recurrent_gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
recurrent_alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
cells.append(lstm_cell)
self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
cells.append(lstm_cell)
self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
# 2. Affine layer on combination of RNN output and context features.
if self.hidden_layer_dim > 0:
self.hidden_layer = rank1_bnn_layers.DenseRank1(
self.hidden_layer_dim,
activation=tf.nn.relu6,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
kernel_initializer="he_normal",
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
# 3. Output affine layer.
self.output_layer = rank1_bnn_layers.DenseRank1(
output_layer_dim,
the LSTM cell, and for clipping of all aggregated gradients.
return_sequences: Whether or not to return outputs at each time step from
the LSTM, rather than just the final time step.
"""
super().__init__()
self.hidden_layer_dim = hidden_layer_dim
# 1. RNN layer.
cells = []
for _ in range(num_rnn_layers):
# TODO(dusenberrymw): Determine if a grad-clipped version is needed.
lstm_cell = rank1_bnn_layers.LSTMCellRank1(
rnn_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
recurrent_alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
recurrent_gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
recurrent_alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
cells.append(lstm_cell)
self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
# 2. Affine layer on combination of RNN output and context features.
if self.hidden_layer_dim > 0:
self.hidden_layer = rank1_bnn_layers.DenseRank1(
self.hidden_layer_dim,
activation=tf.nn.relu6,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
kernel_initializer="he_normal",
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
# 3. Output affine layer.
self.output_layer = rank1_bnn_layers.DenseRank1(
output_layer_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
the LSTM, rather than just the final time step.
"""
super().__init__()
self.hidden_layer_dim = hidden_layer_dim
# 1. RNN layer.
cells = []
for _ in range(num_rnn_layers):
# TODO(dusenberrymw): Determine if a grad-clipped version is needed.
lstm_cell = rank1_bnn_layers.LSTMCellRank1(
rnn_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
recurrent_alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
recurrent_gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
recurrent_alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
rnn_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
recurrent_alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
recurrent_gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
recurrent_alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
cells.append(lstm_cell)
self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
# 2. Affine layer on combination of RNN output and context features.
if self.hidden_layer_dim > 0:
self.hidden_layer = rank1_bnn_layers.DenseRank1(
self.hidden_layer_dim,
activation=tf.nn.relu6,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
cells = []
for _ in range(num_rnn_layers):
# TODO(dusenberrymw): Determine if a grad-clipped version is needed.
lstm_cell = rank1_bnn_layers.LSTMCellRank1(
rnn_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
recurrent_alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
recurrent_gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
recurrent_alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
cells.append(lstm_cell)
self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
# 2. Affine layer on combination of RNN output and context features.
if self.hidden_layer_dim > 0:
self.hidden_layer = rank1_bnn_layers.DenseRank1(
# 1. RNN layer.
cells = []
for _ in range(num_rnn_layers):
# TODO(dusenberrymw): Determine if a grad-clipped version is needed.
lstm_cell = rank1_bnn_layers.LSTMCellRank1(
rnn_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
recurrent_alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
recurrent_gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
recurrent_alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
cells.append(lstm_cell)
self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
# 2. Affine layer on combination of RNN output and context features.
# TODO(dusenberrymw): Determine if a grad-clipped version is needed.
lstm_cell = rank1_bnn_layers.LSTMCellRank1(
rnn_dim,
alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
recurrent_alpha_initializer=rank1_utils.make_initializer(
alpha_initializer, random_sign_init, dropout_rate),
recurrent_gamma_initializer=rank1_utils.make_initializer(
gamma_initializer, random_sign_init, dropout_rate),
alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
recurrent_alpha_regularizer=rank1_utils.make_regularizer(
alpha_regularizer, prior_mean, prior_stddev),
recurrent_gamma_regularizer=rank1_utils.make_regularizer(
gamma_regularizer, prior_mean, prior_stddev),
kernel_regularizer=tf.keras.regularizers.l2(l2),
recurrent_regularizer=tf.keras.regularizers.l2(l2),
bias_regularizer=tf.keras.regularizers.l2(l2),
use_additive_perturbation=use_additive_perturbation,
ensemble_size=ensemble_size)
cells.append(lstm_cell)
self.rnn_layer = tf.keras.layers.RNN(cells, return_sequences=False)
# 2. Affine layer on combination of RNN output and context features.
if self.hidden_layer_dim > 0:
self.hidden_layer = rank1_bnn_layers.DenseRank1(
self.hidden_layer_dim,
activation=tf.nn.relu6,