Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def train(self):
"""Train the agent."""
# logger
if self.args.log:
wandb.init()
wandb.config.update(self.hyper_params)
# wandb.watch([self.actor, self.critic], log="parameters")
score = 0
i_episode_prev = 0
loss = [0.0, 0.0, 0.0]
state = self.env.reset()
while self.i_episode <= self.args.episode_num:
for _ in range(self.hyper_params["ROLLOUT_LEN"]):
if self.args.render and self.i_episode >= self.args.render_after:
self.env.render()
action = self.select_action(state)
next_state, reward, done, _ = self.step(action)
self.episode_steps += 1
from keras.models import Sequential
from keras.layers import LSTM, TimeDistributed, RepeatVector, Dense
import numpy as np
import wandb
from wandb.keras import WandbCallback
wandb.init()
config = wandb.config
class CharacterTable(object):
"""Given a set of characters:
+ Encode them to a one hot integer representation
+ Decode the one hot integer representation to their character output
+ Decode a vector of probabilities to their character output
"""
def __init__(self, chars):
"""Initialize character table.
# Arguments
chars: Characters that can appear in the input.
"""
self.chars = sorted(set(chars))
self.char_indices = dict((c, i) for i, c in enumerate(self.chars))
self.indices_char = dict((i, c) for i, c in enumerate(self.chars))
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers import Dropout, Flatten, Dense
from keras.applications.vgg16 import VGG16, preprocess_input
from dogcat_data import generators, get_nb_files
import os
import sys
import wandb
from wandb.keras import WandbCallback
wandb.init()
config = wandb.config
# dimensions of our images.
config.img_width = 224
config.img_height = 224
config.epochs = 50
config.batch_size = 40
top_model_weights_path = 'bottleneck.h5'
train_dir = 'dogcat-data/train'
validation_dir = 'dogcat-data/validation'
nb_train_samples = 1000
nb_validation_samples = 1000
def save_bottlebeck_features():
if os.path.exists('bottleneck_features_train.npy') and (len(sys.argv) == 1 or sys.argv[1] != "--force"):
print("Using saved features, pass --force to save new features")
import util
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import text
import wandb
wandb.init()
config = wandb.config
config.vocab_size = 1000
(X_train, y_train), (X_test, y_test) = util.load_imdb()
tokenizer = text.Tokenizer(num_words=config.vocab_size)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_matrix(X_train)
X_test = tokenizer.texts_to_matrix(X_test)
# one hot encode outputs
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
# create model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(2, activation="softmax", input_shape=(1000,)))
def train(self):
"""Train the agent."""
# logger
if self.args.log:
wandb.init()
wandb.config.update(self.hyper_params)
# wandb.watch([self.dqn], log="parameters")
# pre-training if needed
self.pretrain()
max_epsilon, min_epsilon, epsilon_decay = (
self.hyper_params["MAX_EPSILON"],
self.hyper_params["MIN_EPSILON"],
self.hyper_params["EPSILON_DECAY"],
)
for self.i_episode in range(1, self.args.episode_num + 1):
state = self.env.reset()
self.episode_step = 0
losses = list()
done = False
def train():
run = wandb.init(config=config_defaults)
shorten = dict(width="w", height="h", activation="a")
clean = lambda x: '{:0.1f}'.format(x) if isinstance(x, float) else x
run.name = "run:" + ','.join([
'{}={}'.format(shorten.get(k), clean(v)) for k, v in dict(run.config).items() if k in shorten])
run.save()
conf = dict(wandb.config)
value = conf.get("width") + conf.get("height")
wandb.log(dict(mean_loss=value))
from keras.preprocessing import sequence
from keras.preprocessing import text
import amazon
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding, LSTM
from keras.layers import Conv1D, Flatten
from keras.preprocessing import text
import wandb
from wandb.keras import WandbCallback
wandb.init()
config = wandb.config
config.vocab_size = 1000
(train_summary, train_review_text, train_labels), (test_summary, test_review_text, test_labels) = amazon.load_amazon()
config.vocab_size = 1000
config.maxlen = 1000
config.batch_size = 32
config.embedding_dims = 50
config.filters = 250
config.kernel_size = 3
config.hidden_dims = 250
config.epochs = 10
(X_train, y_train), (X_test, y_test) = (train_summary, train_labels), (test_summary, test_labels)
print("Review", X_train[0])
print("Label", y_train[0])
SEED = seed
random.seed(seed)
np.random.seed(seed)
th.manual_seed(seed)
# Wrap envs.Logger.log for live logging
envs.Logger.log = benchmark_log(envs.Logger.log)
envs.Logger._episodes_stats = benchmark_stats(envs.Logger._episodes_stats)
# Train
print('Benchmarks: Started training.')
exec(main_code)
# Update informations about environment
if hasattr(env, 'spec'):
wandb.config.update(env.spec)
# Compute and log all rewards
if hasattr(env, 'all_rewards'):
print('Benchmarks: Computing rewards.')
R = 0
returns = []
for i, (reward, done) in enumerate(zip(env.all_rewards,
env.all_dones)):
wandb.log({
'all_rewards': reward,
'all_dones': done,
}, step=i)
R += reward
if bool(done):
wandb.log({
def create_categorical_decoder():
'''
Create the decoder with an optional class appended to the input.
'''
decoder_input = layers.Input(shape=(wandb.config.latent_dim,))
label_input = layers.Input(shape=(len(wandb.config.labels),))
if wandb.config.conditional:
x = layers.concatenate([decoder_input, label_input], axis=-1)
else:
x = decoder_input
x = layers.Dense(512, activation='relu')(x)
x = layers.Dense(img_size * img_size, activation='sigmoid')(x)
x = layers.Reshape((img_size, img_size, 1))(x)
return Model([decoder_input, label_input], x, name='decoder')