Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
chn = 8
self.first_view = 16 * chn
self.G_linear = SpectralNorm(nn.Linear(20, 4 * 4 * 16 * chn))
self.conv = nn.ModuleList([GBlock(16*chn, 16*chn, n_class=n_class),
GBlock(16*chn, 8*chn, n_class=n_class),
GBlock(8*chn, 4*chn, n_class=n_class),
GBlock(4*chn, 2*chn, n_class=n_class),
SelfAttention(2*chn),
GBlock(2*chn, 1*chn, n_class=n_class)])
# TODO impl ScaledCrossReplicaBatchNorm
self.ScaledCrossReplicaBN = ScaledCrossReplicaBatchNorm2d(1*chn)
self.colorize = SpectralNorm(nn.Conv2d(1*chn, 3, [3, 3], padding=1))
def __init__(self, in_channel, out_channel, kernel_size=[3, 3],
padding=1, stride=1, n_class=None, bn=True,
activation=F.relu, upsample=True, downsample=False):
super().__init__()
gain = 2 ** 0.5
self.conv0 = SpectralNorm(nn.Conv2d(in_channel, out_channel,
kernel_size, stride, padding,
bias=True if bn else True))
self.conv1 = SpectralNorm(nn.Conv2d(out_channel, out_channel,
kernel_size, stride, padding,
bias=True if bn else True))
self.skip_proj = False
if in_channel != out_channel or upsample or downsample:
self.conv_sc = SpectralNorm(nn.Conv2d(in_channel, out_channel,
1, 1, 0))
self.skip_proj = True
self.upsample = upsample
self.downsample = downsample
self.activation = activation
self.bn = bn
if bn:
self.HyperBN = ConditionalNorm(in_channel, 148)
self.HyperBN_1 = ConditionalNorm(out_channel, 148)
def __init__(self, n_class=1000, chn=96, debug=False):
super().__init__()
def conv(in_channel, out_channel, downsample=True):
return GBlock(in_channel, out_channel,
bn=False,
upsample=False, downsample=downsample)
gain = 2 ** 0.5
if debug:
chn = 8
self.debug = debug
self.pre_conv = nn.Sequential(SpectralNorm(nn.Conv2d(3, 1*chn, 3,padding=1),),
nn.ReLU(),
SpectralNorm(nn.Conv2d(1*chn, 1*chn, 3,padding=1),),
nn.AvgPool2d(2))
self.pre_skip = SpectralNorm(nn.Conv2d(3, 1*chn, 1))
self.conv = nn.Sequential(conv(1*chn, 1*chn, downsample=True),
SelfAttention(1*chn),
conv(1*chn, 2*chn, downsample=True),
conv(2*chn, 4*chn, downsample=True),
conv(4*chn, 8*chn, downsample=True),
conv(8*chn, 16*chn, downsample=True),
conv(16*chn, 16*chn, downsample=False))
self.linear = SpectralNorm(nn.Linear(16*chn, 1))
self.embed = nn.Embedding(n_class, 16*chn)
def __init__(self, batch_size=64, image_size=64, conv_dim=64):
super(Discriminator, self).__init__()
self.imsize = image_size
layer1 = []
layer2 = []
layer3 = []
last = []
layer1.append(SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1)))
layer1.append(nn.LeakyReLU(0.1))
curr_dim = conv_dim
layer2.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer2.append(nn.LeakyReLU(0.1))
curr_dim = curr_dim * 2
layer3.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer3.append(nn.LeakyReLU(0.1))
curr_dim = curr_dim * 2
if self.imsize == 64:
layer4 = []
layer4.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer4.append(nn.LeakyReLU(0.1))
self.l4 = nn.Sequential(*layer4)
curr_dim = curr_dim*2
self.l1 = nn.Sequential(*layer1)
self.l2 = nn.Sequential(*layer2)
self.l3 = nn.Sequential(*layer3)
repeat_num = int(np.log2(self.imsize)) - 3
mult = 2 ** repeat_num # 8
layer1.append(SpectralNorm(nn.ConvTranspose2d(z_dim, conv_dim * mult, 4)))
layer1.append(nn.BatchNorm2d(conv_dim * mult))
layer1.append(nn.ReLU())
curr_dim = conv_dim * mult
layer2.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
layer2.append(nn.BatchNorm2d(int(curr_dim / 2)))
layer2.append(nn.ReLU())
curr_dim = int(curr_dim / 2)
layer3.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
layer3.append(nn.BatchNorm2d(int(curr_dim / 2)))
layer3.append(nn.ReLU())
if self.imsize == 64:
layer4 = []
curr_dim = int(curr_dim / 2)
layer4.append(SpectralNorm(nn.ConvTranspose2d(curr_dim, int(curr_dim / 2), 4, 2, 1)))
layer4.append(nn.BatchNorm2d(int(curr_dim / 2)))
layer4.append(nn.ReLU())
self.l4 = nn.Sequential(*layer4)
curr_dim = int(curr_dim / 2)
self.l1 = nn.Sequential(*layer1)
self.l2 = nn.Sequential(*layer2)
self.l3 = nn.Sequential(*layer3)
return GBlock(in_channel, out_channel,
bn=False,
upsample=False, downsample=downsample)
gain = 2 ** 0.5
if debug:
chn = 8
self.debug = debug
self.pre_conv = nn.Sequential(SpectralNorm(nn.Conv2d(3, 1*chn, 3,padding=1),),
nn.ReLU(),
SpectralNorm(nn.Conv2d(1*chn, 1*chn, 3,padding=1),),
nn.AvgPool2d(2))
self.pre_skip = SpectralNorm(nn.Conv2d(3, 1*chn, 1))
self.conv = nn.Sequential(conv(1*chn, 1*chn, downsample=True),
SelfAttention(1*chn),
conv(1*chn, 2*chn, downsample=True),
conv(2*chn, 4*chn, downsample=True),
conv(4*chn, 8*chn, downsample=True),
conv(8*chn, 16*chn, downsample=True),
conv(16*chn, 16*chn, downsample=False))
self.linear = SpectralNorm(nn.Linear(16*chn, 1))
self.embed = nn.Embedding(n_class, 16*chn)
self.embed.weight.data.uniform_(-0.1, 0.1)
self.embed = spectral_norm(self.embed)
self.pre_conv = nn.Sequential(SpectralNorm(nn.Conv2d(3, 1*chn, 3,padding=1),),
nn.ReLU(),
SpectralNorm(nn.Conv2d(1*chn, 1*chn, 3,padding=1),),
nn.AvgPool2d(2))
self.pre_skip = SpectralNorm(nn.Conv2d(3, 1*chn, 1))
self.conv = nn.Sequential(conv(1*chn, 1*chn, downsample=True),
SelfAttention(1*chn),
conv(1*chn, 2*chn, downsample=True),
conv(2*chn, 4*chn, downsample=True),
conv(4*chn, 8*chn, downsample=True),
conv(8*chn, 16*chn, downsample=True),
conv(16*chn, 16*chn, downsample=False))
self.linear = SpectralNorm(nn.Linear(16*chn, 1))
self.embed = nn.Embedding(n_class, 16*chn)
self.embed.weight.data.uniform_(-0.1, 0.1)
self.embed = spectral_norm(self.embed)
padding=1, stride=1, n_class=None, bn=True,
activation=F.relu, upsample=True, downsample=False):
super().__init__()
gain = 2 ** 0.5
self.conv0 = SpectralNorm(nn.Conv2d(in_channel, out_channel,
kernel_size, stride, padding,
bias=True if bn else True))
self.conv1 = SpectralNorm(nn.Conv2d(out_channel, out_channel,
kernel_size, stride, padding,
bias=True if bn else True))
self.skip_proj = False
if in_channel != out_channel or upsample or downsample:
self.conv_sc = SpectralNorm(nn.Conv2d(in_channel, out_channel,
1, 1, 0))
self.skip_proj = True
self.upsample = upsample
self.downsample = downsample
self.activation = activation
self.bn = bn
if bn:
self.HyperBN = ConditionalNorm(in_channel, 148)
self.HyperBN_1 = ConditionalNorm(out_channel, 148)
def __init__(self, batch_size=64, image_size=64, conv_dim=64):
super(Discriminator, self).__init__()
self.imsize = image_size
layer1 = []
layer2 = []
layer3 = []
last = []
layer1.append(SpectralNorm(nn.Conv2d(3, conv_dim, 4, 2, 1)))
layer1.append(nn.LeakyReLU(0.1))
curr_dim = conv_dim
layer2.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer2.append(nn.LeakyReLU(0.1))
curr_dim = curr_dim * 2
layer3.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer3.append(nn.LeakyReLU(0.1))
curr_dim = curr_dim * 2
if self.imsize == 64:
layer4 = []
layer4.append(SpectralNorm(nn.Conv2d(curr_dim, curr_dim * 2, 4, 2, 1)))
layer4.append(nn.LeakyReLU(0.1))
def __init__(self, in_channel, out_channel, kernel_size=[3, 3],
padding=1, stride=1, n_class=None, bn=True,
activation=F.relu, upsample=True, downsample=False):
super().__init__()
gain = 2 ** 0.5
self.conv0 = SpectralNorm(nn.Conv2d(in_channel, out_channel,
kernel_size, stride, padding,
bias=True if bn else True))
self.conv1 = SpectralNorm(nn.Conv2d(out_channel, out_channel,
kernel_size, stride, padding,
bias=True if bn else True))
self.skip_proj = False
if in_channel != out_channel or upsample or downsample:
self.conv_sc = SpectralNorm(nn.Conv2d(in_channel, out_channel,
1, 1, 0))
self.skip_proj = True
self.upsample = upsample
self.downsample = downsample
self.activation = activation
self.bn = bn