Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'(reference gives correct base).'.format(train_benchmark))
print('benchmark accuracy on validation set {}, '
'(reference gives correct base).'.format(validate_benchmark))
print('benchmark accuracy on test set {}, '
'(reference gives correct base).'.format(test_benchmark))
np.random.seed(0)
# setup model
feature_length = X_train.shape[1]
batch_shape = (batch_size, window_size, feature_length)
model = build_lstm(batch_shape)
model.summary()
# setup data generators
train_generator = serve_sample_batch(X_train, y_train, *params)
validate_generator = serve_sample_batch(X_validate, y_validate, *params)
test_generator = serve_sample_batch(X_test, y_test, *params)
# setup callbacks
training_outfile = '_'.join([out_prefix, 'training_history.txt'])
csv_logger = CSVLogger(training_outfile, separator='\t')
checkpoint_out = '_'.join([out_prefix, 'checkpoint_model.epoch{epoch:02d}.h5'])
model_checkpoint = ModelCheckpoint(checkpoint_out)
callbacks = [csv_logger, model_checkpoint]
# train model
model.fit_generator(train_generator, steps_per_epoch=train_steps,
epochs=epochs, validation_data=validate_generator,
validation_steps=validate_steps, callbacks=callbacks,
workers=threads, class_weight=class_weights)
print('benchmark accuracy on validation set {}, '
'(reference gives correct base).'.format(validate_benchmark))
print('benchmark accuracy on test set {}, '
'(reference gives correct base).'.format(test_benchmark))
np.random.seed(0)
# setup model
feature_length = X_train.shape[1]
batch_shape = (batch_size, window_size, feature_length)
model = build_lstm(batch_shape)
model.summary()
# setup data generators
train_generator = serve_sample_batch(X_train, y_train, *params)
validate_generator = serve_sample_batch(X_validate, y_validate, *params)
test_generator = serve_sample_batch(X_test, y_test, *params)
# setup callbacks
training_outfile = '_'.join([out_prefix, 'training_history.txt'])
csv_logger = CSVLogger(training_outfile, separator='\t')
checkpoint_out = '_'.join([out_prefix, 'checkpoint_model.epoch{epoch:02d}.h5'])
model_checkpoint = ModelCheckpoint(checkpoint_out)
callbacks = [csv_logger, model_checkpoint]
# train model
model.fit_generator(train_generator, steps_per_epoch=train_steps,
epochs=epochs, validation_data=validate_generator,
validation_steps=validate_steps, callbacks=callbacks,
workers=threads, class_weight=class_weights)
# report final results on test data
'(reference gives correct base).'.format(validate_benchmark))
print('benchmark accuracy on test set {}, '
'(reference gives correct base).'.format(test_benchmark))
np.random.seed(0)
# setup model
feature_length = X_train.shape[1]
batch_shape = (batch_size, window_size, feature_length)
model = build_lstm(batch_shape)
model.summary()
# setup data generators
train_generator = serve_sample_batch(X_train, y_train, *params)
validate_generator = serve_sample_batch(X_validate, y_validate, *params)
test_generator = serve_sample_batch(X_test, y_test, *params)
# setup callbacks
training_outfile = '_'.join([out_prefix, 'training_history.txt'])
csv_logger = CSVLogger(training_outfile, separator='\t')
checkpoint_out = '_'.join([out_prefix, 'checkpoint_model.epoch{epoch:02d}.h5'])
model_checkpoint = ModelCheckpoint(checkpoint_out)
callbacks = [csv_logger, model_checkpoint]
# train model
model.fit_generator(train_generator, steps_per_epoch=train_steps,
epochs=epochs, validation_data=validate_generator,
validation_steps=validate_steps, callbacks=callbacks,
workers=threads, class_weight=class_weights)
# report final results on test data
loss_and_metrics = model.evaluate_generator(test_generator, test_steps,