Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def __init__(self, input_size, n_hidden, n_head, drop_prob=0.1):
"""
The whole transformer layer
* input_size [int]: input sizes for query & key & value
* n_hidden [int]: number of hidden weight matrix nodes
* n_head [int]: number of attentions
"""
super(transformer_layer, self).__init__()
self.attention = multi_head_attention_layer(input_size, n_hidden=n_hidden, n_head=n_head)
self.drop_out = nn.Dropout(drop_prob)
self.norm_1 = nn.LayerNorm(input_size)
self.feed_forward = nn.Sequential(
nn.Linear(input_size, input_size),
nn.ReLU(),
nn.Dropout(drop_prob),
nn.Linear(input_size, input_size),
)
self.norm_2 = nn.LayerNorm(input_size)
def mnist_model():
model = nn.Sequential(
nn.Conv2d(1, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
return model
)
self.inception_4a_relu_3x3 = nn.ReLU(inplace)
self.inception_4a_double_3x3_reduce = nn.Conv2d(
576, 96, kernel_size=(1, 1), stride=(1, 1)
)
self.inception_4a_double_3x3_reduce_bn = nn.BatchNorm2d(
96, eps=1e-05, momentum=0.9, affine=True
)
self.inception_4a_relu_double_3x3_reduce = nn.ReLU(inplace)
self.inception_4a_double_3x3_1 = nn.Conv2d(
96, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
)
self.inception_4a_double_3x3_1_bn = nn.BatchNorm2d(
128, eps=1e-05, momentum=0.9, affine=True
)
self.inception_4a_relu_double_3x3_1 = nn.ReLU(inplace)
self.inception_4a_double_3x3_2 = nn.Conv2d(
128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
)
self.inception_4a_double_3x3_2_bn = nn.BatchNorm2d(
128, eps=1e-05, momentum=0.9, affine=True
)
self.inception_4a_relu_double_3x3_2 = nn.ReLU(inplace)
self.inception_4a_pool = nn.AvgPool2d(
3, stride=1, padding=1, ceil_mode=True, count_include_pad=True
)
self.inception_4a_pool_proj = nn.Conv2d(
576, 128, kernel_size=(1, 1), stride=(1, 1)
)
self.inception_4a_pool_proj_bn = nn.BatchNorm2d(
128, eps=1e-05, momentum=0.9, affine=True
)
# compute conv feature size
input_shape = (1, 1, 36, 60)#m1: [32 x 3600], m2: [115202 x 500]
#[32 x 3600], m2: [3602 x 500]
with torch.no_grad():#https://stackoverflow.com/questions/53784998/how-are-the-pytorch-dimensions-for-linear-layers-calculated
self.feature_size = self._forward_conv(
torch.zeros(*input_shape)).view(-1).size(0)
#print(self.feature_size)
# print("numOfFeatures:",self.feature_size)
# Fully connected-500
self.fc500 = nn.Linear(self.feature_size, 500)
#self.fc500 = nn.Linear(50*14*8, 500)
#Relu function
self.relu = nn.ReLU()
# Fully connected-2
self.fc2 = nn.Linear(502, 2)
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
m = OrderedDict()
m['conv1'] = nn.Conv1d(inplanes, planes, kernel_size=1, bias=False)
m['bn1'] = nn.BatchNorm1d(planes)
m['relu1'] = nn.ReLU(inplace=True)
m['conv2'] = nn.Conv1d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
m['bn2'] = nn.BatchNorm1d(planes)
m['relu2'] = nn.ReLU(inplace=True)
m['conv3'] = nn.Conv1d(planes, planes * 4, kernel_size=1, bias=False)
m['bn3'] = nn.BatchNorm1d(planes * 4)
self.group1 = nn.Sequential(m)
self.relu= nn.Sequential(nn.ReLU(inplace=True))
self.downsample = downsample
def CORnet_S():
model = nn.Sequential(OrderedDict([
('V1', nn.Sequential(OrderedDict([ # this one is custom to save GPU memory
('conv1', nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)),
('norm1', nn.BatchNorm2d(64)),
('nonlin1', nn.ReLU(inplace=True)),
('pool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
('conv2', nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1,
bias=False)),
('norm2', nn.BatchNorm2d(64)),
('nonlin2', nn.ReLU(inplace=True)),
('output', Identity())
]))),
('V2', CORblock_S(64, 128, times=2)),
('V4', CORblock_S(128, 256, times=4)),
('IT', CORblock_S(256, 512, times=2)),
('decoder', nn.Sequential(OrderedDict([
('avgpool', nn.AdaptiveAvgPool2d(1)),
('flatten', Flatten()),
('linear', nn.Linear(512, 1000)),
('output', Identity())
])))
def network(args):
convLayers = [
nn.Conv2d(args.channel_num, 32, 3, 1, 1, bias=False), # Bxchx96x96 -> Bx32x96x96
nn.BatchNorm2d(32)]
if args.use_ReLU:
convLayers += [nn.ReLU(True)]
elif args.use_ELU:
convLayers += [nn.ELU()]
convLayers += [
nn.Conv2d(32, 64, 3, 1, 1, bias=False), # Bx32x96x96 -> Bx64x96x96
nn.BatchNorm2d(64)]
if args.use_ReLU:
convLayers += [nn.ReLU(True)]
elif args.use_ELU:
convLayers += [nn.ELU()]
for i in range(1,5):
if args.use_strided:
convLayers += [
nn.ZeroPad2d((0, 1, 0, 1)), # Bx64*ix96x96 -> Bx64*ix97x97
nn.Conv2d(64*i, 64*i, 3, 2, 0, bias=False), # Bx64x97x97 -> Bx64x48x48
nn.BatchNorm2d(64*i)]
)
self.inception_5b_relu_double_3x3_reduce = nn.ReLU(inplace)
self.inception_5b_double_3x3_1 = nn.Conv2d(
192, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
)
self.inception_5b_double_3x3_1_bn = nn.BatchNorm2d(
224, eps=1e-05, momentum=0.9, affine=True
)
self.inception_5b_relu_double_3x3_1 = nn.ReLU(inplace)
self.inception_5b_double_3x3_2 = nn.Conv2d(
224, 224, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)
)
self.inception_5b_double_3x3_2_bn = nn.BatchNorm2d(
224, eps=1e-05, momentum=0.9, affine=True
)
self.inception_5b_relu_double_3x3_2 = nn.ReLU(inplace)
self.inception_5b_pool = nn.MaxPool2d(
(3, 3), stride=(1, 1), padding=(1, 1), dilation=(1, 1), ceil_mode=True
)
self.inception_5b_pool_proj = nn.Conv2d(
1024, 128, kernel_size=(1, 1), stride=(1, 1)
)
self.inception_5b_pool_proj_bn = nn.BatchNorm2d(
128, eps=1e-05, momentum=0.9, affine=True
)
self.inception_5b_relu_pool_proj = nn.ReLU(inplace)
self.global_pool = nn.AvgPool2d(
7, stride=1, padding=0, ceil_mode=True, count_include_pad=True
)
self.fc = nn.Linear(1024, num_classes)
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
super(Net_SHG, self).__init__()
if blockstr == 'ConvBlock':
block = ConvBlock
elif blockstr == 'BasicBlock':
block = BasicBlock
elif blockstr == 'BottleNeck':
block = BottleNeck
elif blockstr == 'BottleneckPreact':
block = BottleneckPreact
print('Initializing {} hourglasses with {} blocks'.format(nStack, blockstr))
self.conv1 = nn.Conv2d(3, nFeat/4, kernel_size=7, stride=2, padding=3,
bias=False) # 128
self.bn1 = nn.BatchNorm2d(nFeat/4)
self.relu = nn.ReLU(inplace=True)
self.r1 = block(nFeat/4, nFeat/2)
self.pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
self.r4 = block(nFeat/2, nFeat/2)
self.r5 = block(nFeat/2, nFeat)
self.up4 = nn.Upsample(scale_factor=4, mode='bilinear')
hg = []
convout = []
for i in range(0, nStack):
layers = []
layers.append(HourGlass(nHGscales, block, nFeat, nModules))
for j in range(0, nModules):
layers.append(self._make_layer(block, nFeat, nModules))