Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
# Define TF model graph
self.model = model_mnist()
self.predictions = self.model(self.x)
print("Defined TensorFlow model graph.")
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test examples
accuracy = model_eval(self.sess, self.x, self.y, self.predictions, self.X_test, self.Y_test)
print('Test accuracy on legitimate test examples: ' + str(accuracy))
# Define TF model graph
self.model = model_mnist()
self.predictions = self.model(self.x)
# Train the MNIST model
model_train(self.sess, self.x, self.y, self.predictions, X_train, Y_train, evaluate=evaluate)
model_sub = CIFARModel(use_log = True).model
preds_sub = model_sub(x)
print("Defined TensorFlow model graph for the substitute.")
# Define the Jacobian symbolically using TensorFlow
grads = jacobian_graph(preds_sub, x, nb_classes)
# Train the substitute and augment dataset alternatively
for rho in xrange(data_aug):
print("Substitute training epoch #" + str(rho))
train_params = {
'nb_epochs': nb_epochs_s,
'batch_size': batch_size,
'learning_rate': learning_rate
}
model_train(sess, x, y, preds_sub, X_sub, to_categorical(Y_sub),
init_all=False, verbose=False, args=train_params)
# If we are not at last substitute training iteration, augment dataset
if rho < data_aug - 1:
if FLAGS.cached_aug:
augs = np.load('sub_saved/{}-aug-{}.npz'.format(DATASET, rho))
X_sub = augs['X_sub']
Y_sub = augs['Y_sub']
else:
print("Augmenting substitute training data.")
# Perform the Jacobian augmentation
X_sub = jacobian_augmentation(sess, x, X_sub, Y_sub, grads, lmbda)
print("Labeling substitute training data.")
# Label the newly generated synthetic points using the black-box
Y_sub = np.hstack([Y_sub, Y_sub])
model = CIFARModel(use_log = True).model
# model = CIFARModel(use_log = True).model
predictions = model(x)
print("Defined TensorFlow model graph.")
# Train an CIFAR model
if FLAGS.load_pretrain:
# use the restored CIFAR model
tf_model_load(sess)
else:
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
model_train(sess, x, y, predictions, X_train, Y_train, verbose=True, save=True,
args=train_params)
# Print out the accuracy on legitimate data
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, predictions, X_test, Y_test,
args=eval_params)
print('Test accuracy of black-box on legitimate test '
'examples: ' + str(accuracy))
return model, predictions, accuracy
preds_adv = None
if FLAGS.defense_type == 'adv_tr':
attack_params = {'eps': FLAGS.fgsm_eps_tr,
'clip_min': 0.,
'clip_max': 1.}
if gan:
if gan.dataset_name == 'celeba':
attack_params['clip_min'] = -1.0
attack_obj = FastGradientMethod(model, sess=sess)
adv_x_tr = attack_obj.generate(images_pl, **attack_params)
adv_x_tr = tf.stop_gradient(adv_x_tr)
preds_adv = model(adv_x_tr)
model_train(sess, images_pl, labels_pl, preds, train_images, train_labels,
args=train_params, rng=rng, predictions_adv=preds_adv,
init_all=False, feed={K.learning_phase(): 1},
evaluate=evaluate)
# Calculate training error.
eval_params = {'batch_size': batch_size}
acc = model_eval(
sess, images_pl, labels_pl, preds, train_images, train_labels,
args=eval_params, feed={K.learning_phase(): 0},
)
print('[#] Accuracy on clean examples {}'.format(acc))
if attack_type is None:
return acc, 0, None
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph.
model_sub = MNISTModel(use_log = True).model
preds_sub = model_sub(x)
print("Defined TensorFlow model graph for the substitute.")
# Define the Jacobian symbolically using TensorFlow
grads = jacobian_graph(preds_sub, x, nb_classes)
# Train the substitute and augment dataset alternatively
for rho in xrange(data_aug):
print("Substitute training epoch #" + str(rho))
train_params = {
'nb_epochs': nb_epochs_s,
'batch_size': batch_size,
'learning_rate': learning_rate
}
model_train(sess, x, y, preds_sub, X_sub, to_categorical(Y_sub),
init_all=False, verbose=False, args=train_params)
# If we are not at last substitute training iteration, augment dataset
if rho < data_aug - 1:
if FLAGS.cached_aug:
augs = np.load('sub_saved/mnist-aug-{}.npz'.format(rho))
X_sub = augs['X_sub']
Y_sub = augs['Y_sub']
else:
print("Augmenting substitute training data.")
# Perform the Jacobian augmentation
X_sub = jacobian_augmentation(sess, x, X_sub, Y_sub, grads, lmbda)
print("Labeling substitute training data.")
# Label the newly generated synthetic points using the black-box
Y_sub = np.hstack([Y_sub, Y_sub])
model_sub = substitute_model
preds_sub = model_sub(x)
print("Defined TensorFlow model graph for the substitute.")
# Define the Jacobian symbolically using TensorFlow.
grads = jacobian_graph(preds_sub, x, nb_classes)
# Train the substitute and augment dataset alternatively.
for rho in xrange(data_aug):
print("Substitute training epoch #" + str(rho))
train_params = {
'nb_epochs': nb_epochs_s,
'batch_size': batch_size,
'learning_rate': learning_rate
}
model_train(sess, x, y, preds_sub, X_sub, to_categorical(Y_sub),
init_all=False, args=train_params,
rng=rng, feed={K.learning_phase(): 1})
# If we are not at last substitute training iteration, augment dataset.
if rho < data_aug - 1:
print("Augmenting substitute training data.")
# Perform the Jacobian augmentation.
X_sub = jacobian_augmentation(sess, x, X_sub, Y_sub, grads, lmbda,
feed={K.learning_phase(): 0})
print("Labeling substitute training data.")
# Label the newly generated synthetic points using the black-box.
Y_sub = np.hstack([Y_sub, Y_sub])
X_sub_prev = X_sub[int(len(X_sub) / 2):]
eval_params = {'batch_size': batch_size}
# Define TF model graph (for the black-box model)
model = MNISTModel(use_log = True).model
predictions = model(x)
print("Defined TensorFlow model graph.")
# Train an MNIST model
if FLAGS.load_pretrain:
tf_model_load(sess)
else:
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
model_train(sess, x, y, predictions, X_train, Y_train, verbose=False, save=True,
args=train_params)
# Print out the accuracy on legitimate data
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, predictions, X_test, Y_test,
args=eval_params)
print('Test accuracy of black-box on legitimate test '
'examples: ' + str(accuracy))
return model, predictions, accuracy
rng = np.random.RandomState([2017, 8, 30])
if clean_train:
model = make_madryetal()
preds = model.get_probs(x)
def evaluate():
# Evaluate the accuracy of the MNIST model on legitimate test
# examples
eval_params = {'batch_size': batch_size}
acc = model_eval(
sess, x, y, preds, X_test, Y_test, args=eval_params)
report.clean_train_clean_eval = acc
assert X_test.shape[0] == test_end - test_start, X_test.shape
print('Test accuracy on legitimate examples: %0.4f' % acc)
model_train(sess, x, y, preds, X_train, Y_train, evaluate=evaluate,
args=train_params, rng=rng)
# Calculate training error
if testing:
eval_params = {'batch_size': batch_size}
acc = model_eval(
sess, x, y, preds, X_train, Y_train, args=eval_params)
report.train_clean_train_clean_eval = acc
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
attack = MadryEtAl(model, sess=sess)
adv_x = attack.generate(x, **attack_params)
preds_adv = model.get_probs(adv_x)
# Evaluate the accuracy of the MNIST model on adversarial examples
model = MNISTModel(use_log = True).model
else:
model = CIFARModel(use_log = True).model
predictions = model(x)
print("Defined TensorFlow model graph.")
# Train an MNIST model
if FLAGS.load_pretrain:
tf_model_load(sess)
else:
train_params = {
'nb_epochs': nb_epochs,
'batch_size': batch_size,
'learning_rate': learning_rate
}
model_train(sess, x, y, predictions, X_train, Y_train, verbose=True, save=True,
args=train_params)
# Print out the accuracy on legitimate data
eval_params = {'batch_size': batch_size}
accuracy = model_eval(sess, x, y, predictions, X_test, Y_test,
args=eval_params)
print('Test accuracy of black-box on legitimate test '
'examples: ' + str(accuracy))
return model, predictions, accuracy