Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'convolutional':
conv_id = conv_id + 1
batch_normalize = int(block['batch_normalize'])
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
is_pad = int(block['pad'])
pad = (kernel_size - 1) // 2 if is_pad else 0
activation = block['activation']
model = nn.Sequential()
if batch_normalize:
model.add_module('conv{0}'.format(conv_id),
nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
else:
model.add_module('conv{0}'.format(conv_id),
nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
if activation == 'leaky':
model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
elif activation == 'relu':
model.add_module('relu{0}'.format(conv_id), nn.ReLU(inplace=True))
prev_filters = filters
out_filters.append(prev_filters)
models.append(model)
elif block['type'] == 'trans_conv':
conv_id = conv_id + 1
batch_normalize = int(block['batch_normalize'])
filters = int(block['filters'])
kernel_size = int(block['size'])
stride = int(block['stride'])
alpha = torch.cuda.FloatTensor(x1.size(0)).uniform_()
alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)
else:
alpha = 0.5
return alpha * x1 + (1 - alpha) * x2
@staticmethod
def backward(ctx, grad_output):
beta = torch.cuda.FloatTensor(grad_output.size(0)).uniform_()
beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)
beta = Variable(beta)
return beta * grad_output, (1 - beta) * grad_output, None
class Shortcut(nn.Module):
def __init__(self, in_ch, out_ch, stride):
super(Shortcut, self).__init__()
self.stride = stride
self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1,
stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1,
stride=1, padding=0, bias=False)
self.bn = nn.BatchNorm2d(out_ch)
def forward(self, x):
h = F.relu(x)
h1 = F.avg_pool2d(h, 1, self.stride)
h1 = self.conv1(h1)
# networks
self.model = Generator(num_channels=self.num_channels, base_filter=64, num_residuals=16)
# weigh initialization
self.model.weight_init()
# optimizer
self.optimizer = optim.Adam(self.model.parameters(), lr=self.lr, betas=(0.9, 0.999))
# loss function
if self.gpu_mode:
self.model.cuda()
self.MSE_loss = nn.MSELoss().cuda()
else:
self.MSE_loss = nn.MSELoss()
print('---------- Networks architecture -------------')
utils.print_network(self.model)
print('----------------------------------------------')
def __init__(self, depth, flatten=True):
super(ConvNet, self).__init__()
trunk = []
for i in range(depth):
indim = 3 if i == 0 else 64
outdim = 64
B = ConvBlock(indim, outdim, pool=(i < 4)) # only pooling for fist 4 layers
trunk.append(B)
if flatten:
trunk.append(Flatten())
self.trunk = nn.Sequential(*trunk)
self.final_feat_dim = 1600
def __init__(self):
super(LeNet, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(1, 6, 5),
nn.Sigmoid(),
nn.AvgPool2d(2, 2),
nn.Conv2d(6, 16, 5),
nn.Sigmoid(),
nn.AvgPool2d(2, 2)
)
self.fc = nn.Sequential(
nn.Linear(16 * 4 * 4, 120),
nn.Sigmoid(),
nn.Linear(120, 84),
nn.Sigmoid(),
nn.Linear(84, 10)
)
def __init__(self, gpus):
super(Discriminator, self).__init__()
self.ngpu = gpus
self.main = nn.Sequential(
nn.Linear(nc * opt.imageSize * opt.imageSize, 512),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
nn.Sigmoid()
)
def __init__(
self, state_dim, action_dim, num_hiddens, num_hidden_layers, num_gaussians
):
super().__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.num_hiddens = num_hiddens
self.num_hidden_layers = num_hidden_layers
self.num_gaussians = num_gaussians
# outputs:
# 1. mu, sigma, and pi for each gaussian
# 2. non-terminal signal
# 3. reward
self.gmm_linear = nn.Linear(
num_hiddens, (2 * state_dim + 1) * num_gaussians + 2
)
self.drop = nn.Dropout(config.dropout)
self.n_layer = config.n_layer
self.tgt_len = config.tgt_len
self.mem_len = config.mem_len
self.ext_len = config.ext_len
self.max_klen = config.tgt_len + config.ext_len + config.mem_len
self.attn_type = config.attn_type
if not config.untie_r:
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.layers = nn.ModuleList()
if config.attn_type == 0: # the default attention
for i in range(config.n_layer):
self.layers.append(
RelPartialLearnableDecoderLayer(
config.n_head, config.d_model, config.d_head, config.d_inner, config.dropout,
tgt_len=config.tgt_len, ext_len=config.ext_len, mem_len=config.mem_len,
dropatt=config.dropatt, pre_lnorm=config.pre_lnorm,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias,
output_attentions=self.output_attentions,
layer_norm_epsilon=config.layer_norm_epsilon)
)
else: # learnable embeddings and absolute embeddings are not used in our pretrained checkpoints
raise NotImplementedError # Removed them to avoid maintaining dead code
self.same_length = config.same_length
def __init__(self, nc, nf):
super(D, self).__init__()
main = nn.Sequential()
# 256
layer_idx = 1
name = 'layer%d' % layer_idx
main.add_module('%s.conv' % name, nn.Conv2d(nc, nf, 4, 2, 1, bias=False))
# 128
layer_idx += 1
name = 'layer%d' % layer_idx
main.add_module(name, blockUNet(nf, nf*2, name, transposed=False, bn=True, relu=False, dropout=False))
# 64
layer_idx += 1
name = 'layer%d' % layer_idx
nf = nf * 2
main.add_module(name, blockUNet(nf, nf*2, name, transposed=False, bn=True, relu=False, dropout=False))
# 32
layer_idx += 1
name = 'layer%d' % layer_idx
nf = nf * 2