How to use the wandb.init function in wandb

To help you get started, we’ve selected a few wandb examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github lukas / ml-class / keras-color / color.py View on Github external
from keras.layers import Input, Dense, Flatten, Reshape, Conv2D, UpSampling2D, MaxPooling2D
from keras.models import Model, Sequential
from keras.datasets import mnist
from keras.callbacks import Callback
import random
import glob
import wandb
from wandb.keras import WandbCallback
import subprocess
import os

from PIL import Image
import numpy as np

run = wandb.init()
config = run.config

config.num_epochs = 100
config.batch_size = 4
config.img_dir = "images"
config.height = 256
config.width = 256

val_dir = 'dogcat-data/validation/cat'
train_dir = 'dogcat-data/train/cat'
if not os.path.exists("dogcat-data"):
      if os.path.exists("../keras-transfer/dogcat-data"):
            subprocess.check_output("cp -r ../keras-transfer/dogcat-data .", shell=True)
      else:
            print("Downloading dog/cat dataset...")
            subprocess.check_output("curl https://storage.googleapis.com/wandb-production.appspot.com/qualcomm/dogcat-data.tgz | tar xvz", shell=True)
github lukas / ml-class / examples / keras-perf / cnn.py View on Github external
import os
import tensorflow as tf
from tensorflow.python.client import timeline
import wandb
from wandb.keras import WandbCallback

run = wandb.init()
config = run.config
config.first_layer_convs = 32
config.first_layer_conv_width = 3
config.first_layer_conv_height = 3
config.dropout = 0.2
config.dense_layer_size = 128
config.img_width = 32
config.img_height = 32
config.channels = 3
config.epochs = 4

(X_train, y_train), (X_test, y_test) = tf.keras.datasets.cifar10.load_data()

X_train = X_train.astype('float32')
X_train /= 255.
X_test = X_test.astype('float32')
github lukas / ml-class / examples / keras-mlp / dropout.py View on Github external
import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout
from keras.utils import np_utils
import json

from wandb.keras import WandbCallback
import wandb

run = wandb.init()
config = run.config
config.optimizer = "adam"
config.epochs = 50
config.dropout = 0.4
config.hidden_nodes = 100

# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_width = X_train.shape[1]
img_height = X_train.shape[2]

X_train = X_train.astype('float32')
X_train /= 255.
X_test = X_test.astype('float32')
X_test /= 255.
github vwxyzjn / cleanrl / cleanrl / dqn2_atari_visual.py View on Github external
# Image.fromarray(X)
            rgb_image = np.array(Image.fromarray(X).convert('RGB'))
            plt.close(fig)
            q_value_rgb_array = rgb_image
            return np.append(env_rgb_array, q_value_rgb_array, axis=1)
        else:
            super().render(mode)

# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
        '\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
    import wandb
    wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)
    writer = SummaryWriter(f"/tmp/{experiment_name}")

# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
env = wrap_atari(env)
env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`
if args.capture_video:
    env = QValueVisualizationWrapper(env)
    env = Monitor(env, f'videos/{experiment_name}')
env = wrap_deepmind(
    env,
    clip_rewards=True,
    frame_stack=True,
    scale=False,
)
github catalyst-team / catalyst / catalyst / rl / core / trainer.py View on Github external
def _init(self, **kwargs):
        global WANDB_ENABLED
        assert len(kwargs) == 0
        if WANDB_ENABLED:
            if self.monitoring_params is not None:
                self.checkpoints_glob: List[str] = \
                    self.monitoring_params.pop(
                        "checkpoints_glob", ["best.pth", "last.pth"])

                wandb.init(**self.monitoring_params)

                logdir_src = Path(self.logdir)
                logdir_dst = Path(wandb.run.dir)

                configs_src = logdir_src.joinpath("configs")
                os.makedirs(f"{logdir_dst}/{configs_src.name}", exist_ok=True)
                shutil.rmtree(f"{logdir_dst}/{configs_src.name}")
                shutil.copytree(
                    f"{str(configs_src.absolute())}",
                    f"{logdir_dst}/{configs_src.name}")

                code_src = logdir_src.joinpath("code")
                if code_src.exists():
                    os.makedirs(f"{logdir_dst}/{code_src.name}", exist_ok=True)
                    shutil.rmtree(f"{logdir_dst}/{code_src.name}")
                    shutil.copytree(
github lukas / ml-class / keras-fashion / nn.py View on Github external
import numpy
from keras.datasets import fashion_mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten, Dropout, Reshape, Conv2D, MaxPooling2D
from keras.utils import np_utils
import wandb
from wandb.keras import WandbCallback

# logging code
run = wandb.init()
config = run.config
config.epochs = 100
config.lr = 0.01
config.layers = 3
config.hidden_layer_1_size = 128

# load data
(X_train, y_train), (X_test, y_test) = fashion_mnist.load_data()
print(y_test.shape)
print(y_train.shape)

X_train = X_train / 255.
X_test = X_test / 255.

img_width = X_train.shape[1]
img_height = X_train.shape[2]
github lukas / ml-class / examples / keras-cifar / cifar-cnn.py View on Github external
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import Adam

import numpy as np
import os
import wandb
from wandb.keras import WandbCallback

wandb.init()

config = wandb.config
config.batch_size = 128
config.epochs = 10
config.learn_rate = 0.001
config.dropout = 0.3
config.dense_layer_nodes = 128

class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
               'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(class_names)

(X_train, y_train), (X_test, y_test) = cifar10.load_data()

X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
github lukas / ml-class / scikit / cross-validation-log.py View on Github external
import pandas as pd
import numpy as np
from wandblog import log
import wandb
run = wandb.init(job_type='eval')
config = run.config

df = pd.read_csv('tweets.csv')
target = df['is_there_an_emotion_directed_at_a_brand_or_product']
text = df['tweet_text']

fixed_text = text[pd.notnull(text)]
fixed_target = target[pd.notnull(text)]

from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(lowercase=config.lowercase,
                             ngram_range=(config.ngram_min,
                                          config.ngram_max),
                             token_pattern=config.token_pattern
                                        )
count_vect.fit(fixed_text)
github vwxyzjn / gym-microrts / experiments / history / cleanrl_a2c_hrl_kl.py View on Github external
parser.add_argument('--end-a', type=float, default=0.8,
                       help="the ending alpha for exploration")
    parser.add_argument('--exploration-fraction', type=float, default=0.8,
                       help="the fraction of `total-timesteps` it takes from start-e to go end-e")
    args = parser.parse_args()
    if not args.seed:
        args.seed = int(time.time())

# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
        '\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
    import wandb
    wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True)
    writer = SummaryWriter(f"/tmp/{experiment_name}")
    wandb.save(os.path.abspath(__file__))

# TRY NOT TO MODIFY: seeding
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
env = gym.make(args.gym_id)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
input_shape, preprocess_obs_fn = preprocess_obs_space(env.observation_space, device)
output_shape = preprocess_ac_space(env.action_space)
# respect the default timelimit
github vwxyzjn / cleanrl / cleanrl / experiments / ppo_num_steps.py View on Github external
parser.add_argument('--clip-coef', type=float, default=0.2,
                       help="the surrogate clipping coefficient")
    parser.add_argument('--update-epochs', type=int, default=4,
                        help="the K epochs to update the policy")
    args = parser.parse_args()
    if not args.seed:
        args.seed = int(time.time())

# TRY NOT TO MODIFY: setup the environment
experiment_name = f"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}"
writer = SummaryWriter(f"runs/{experiment_name}")
writer.add_text('hyperparameters', "|param|value|\n|-|-|\n%s" % (
        '\n'.join([f"|{key}|{value}|" for key, value in vars(args).items()])))
if args.prod_mode:
    import wandb
    wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True)
    writer = SummaryWriter(f"/tmp/{experiment_name}")
    wandb.save(os.path.abspath(__file__))

# TRY NOT TO MODIFY: seeding
device = torch.device('cpu')
env = gym.make(args.gym_id)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
env.seed(args.seed)
env.action_space.seed(args.seed)
env.observation_space.seed(args.seed)
input_shape, preprocess_obs_fn = preprocess_obs_space(env.observation_space, device)
output_shape = preprocess_ac_space(env.action_space)
# respect the default timelimit