How to use the sol.sol_dataset.SolDataset function in SoL

To help you get started, we’ve selected a few SoL examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cwig / start_follow_read / continuous_sol_training.py View on Github external
training_set_list = load_file_list(train_config['training_set'])
    train_dataset = SolDataset(training_set_list,
                               rescale_range=train_config['sol']['training_rescale_range'],
                               transform=CropTransform(train_config['sol']['crop_params']))

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=train_config['sol']['batch_size'],
                                  shuffle=True, num_workers=0,
                                  collate_fn=sol_dataset.collate)

    batches_per_epoch = int(train_config['sol']['images_per_epoch']/train_config['sol']['batch_size'])
    train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

    test_set_list = load_file_list(train_config['validation_set'])
    test_dataset = SolDataset(test_set_list,
                              rescale_range=train_config['sol']['validation_rescale_range'],
                              random_subset_size=train_config['sol']['validation_subset_size'],
                              transform=None)
    test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=sol_dataset.collate)


    alpha_alignment = train_config['sol']['alpha_alignment']
    alpha_backprop = train_config['sol']['alpha_backprop']

    sol, lf, hw = init_model(config, only_load='sol')

    dtype = torch.cuda.FloatTensor

    lowest_loss = np.inf
    lowest_loss_i = 0
    epoch = -1
github cwig / start_follow_read / continuous_sol_training.py View on Github external
def training_step(config):

    train_config = config['training']

    allowed_training_time = train_config['sol']['reset_interval']
    init_training_time = time.time()

    training_set_list = load_file_list(train_config['training_set'])
    train_dataset = SolDataset(training_set_list,
                               rescale_range=train_config['sol']['training_rescale_range'],
                               transform=CropTransform(train_config['sol']['crop_params']))

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=train_config['sol']['batch_size'],
                                  shuffle=True, num_workers=0,
                                  collate_fn=sol_dataset.collate)

    batches_per_epoch = int(train_config['sol']['images_per_epoch']/train_config['sol']['batch_size'])
    train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

    test_set_list = load_file_list(train_config['validation_set'])
    test_dataset = SolDataset(test_set_list,
                              rescale_range=train_config['sol']['validation_rescale_range'],
                              random_subset_size=train_config['sol']['validation_subset_size'],
                              transform=None)
github cwig / start_follow_read / sol_pretraining.py View on Github external
training_set_list = load_file_list(pretrain_config['training_set'])
train_dataset = SolDataset(training_set_list,
                           rescale_range=pretrain_config['sol']['training_rescale_range'],
                           transform=CropTransform(pretrain_config['sol']['crop_params']))

train_dataloader = DataLoader(train_dataset,
                              batch_size=pretrain_config['sol']['batch_size'],
                              shuffle=True, num_workers=0,
                              collate_fn=sol.sol_dataset.collate)

batches_per_epoch = int(pretrain_config['sol']['images_per_epoch']/pretrain_config['sol']['batch_size'])
train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

test_set_list = load_file_list(pretrain_config['validation_set'])
test_dataset = SolDataset(test_set_list,
                          rescale_range=pretrain_config['sol']['validation_rescale_range'],
                          transform=None)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=sol.sol_dataset.collate)


base0 = sol_network_config['base0']
base1 = sol_network_config['base1']
sol = StartOfLineFinder(base0, base1)
if torch.cuda.is_available():
    sol.cuda()
    dtype = torch.cuda.FloatTensor
else:
    print "Warning: Not using a GPU, untested"
    dtype = torch.FloatTensor

alpha_alignment = pretrain_config['sol']['alpha_alignment']
github cwig / start_follow_read / sol_pretraining.py View on Github external
import json
import yaml
import sys
import os
import math

from utils import transformation_utils, drawing

with open(sys.argv[1]) as f:
    config = yaml.load(f)

sol_network_config = config['network']['sol']
pretrain_config = config['pretraining']

training_set_list = load_file_list(pretrain_config['training_set'])
train_dataset = SolDataset(training_set_list,
                           rescale_range=pretrain_config['sol']['training_rescale_range'],
                           transform=CropTransform(pretrain_config['sol']['crop_params']))

train_dataloader = DataLoader(train_dataset,
                              batch_size=pretrain_config['sol']['batch_size'],
                              shuffle=True, num_workers=0,
                              collate_fn=sol.sol_dataset.collate)

batches_per_epoch = int(pretrain_config['sol']['images_per_epoch']/pretrain_config['sol']['batch_size'])
train_dataloader = DatasetWrapper(train_dataloader, batches_per_epoch)

test_set_list = load_file_list(pretrain_config['validation_set'])
test_dataset = SolDataset(test_set_list,
                          rescale_range=pretrain_config['sol']['validation_rescale_range'],
                          transform=None)
test_dataloader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=sol.sol_dataset.collate)