Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def trec(x):
return x*(x > 0.0)
nonlinearity = trec
conv1 = cc_layers.Conv2DNoBiasLayer(
input,
n_filters=64,
filter_size=5,
weights_std=winit1,
nonlinearity=nonlinearity,
pad=2)
pool1 = cc_layers.Pooling2DLayer(conv1, 2, stride=2)
unpool2 = cc_layers.Unpooling2DLayer(pool1, pool1)
output = cc_layers.Deconv2DNoBiasLayer(
unpool2, conv1, nonlinearity=layers.identity)
class CAELayer2Model(anna.models.UnsupervisedModel):
batch = 128
input = cc_layers.Input2DLayer(batch, 3, 96, 96)
k = float(numpy.random.rand()*1+0.2)
print '## k = %.3f' % k
winit1 = k/numpy.sqrt(5*5*3)
winit2 = k/numpy.sqrt(5*5*64)
binit = 0.0
def trec(x):
return x*(x > 0.0)
nonlinearity = trec
filter_size=5,
weights_std=winit3,
nonlinearity=nonlinearity,
pad=2)
pool3 = cc_layers.CudaConvnetPooling2DLayer(conv3, 12, stride=12)
winitD1 = k/numpy.sqrt(numpy.prod(pool3.get_output_shape()))
winitD2 = k/numpy.sqrt(512)
pool3_shuffle = cc_layers.ShuffleC01BToBC01Layer(pool3)
fc4 = layers.DenseLayer(
pool3_shuffle,
n_outputs = 512,
weights_std=winitD1,
init_bias_value=1.0,
nonlinearity=layers.rectify,
dropout=0.5)
output = layers.DenseLayer(
fc4,
n_outputs=10,
weights_std=winitD2,
init_bias_value=0.0,
nonlinearity=layers.softmax)
winitD2 = k/numpy.sqrt(300)
conv3_shuffle = cc_layers.ShuffleC01BToBC01Layer(conv3)
fc4 = layers.DenseLayer(
conv3_shuffle,
n_outputs=300,
weights_std=winitD1,
init_bias_value=1.0,
nonlinearity=layers.rectify,
dropout=0.0)
output = layers.DenseLayer(
fc4,
n_outputs=10,
weights_std=winitD2,
init_bias_value=0.0,
nonlinearity=layers.softmax)
def insert_mask_layer(model, model_layer):
all_layers = layers.all_layers(model.output)
all_layers = all_layers[0:-1]
next_layer = [layer for layer in all_layers
if layer.input_layer == model_layer][0]
model.max_mask_layer = MaxMaskLayer(model_layer)
next_layer.input_layer = model.max_mask_layer
model._compile()
weights_std=winit2,
nonlinearity=nonlinearity,
pad=2)
pool2 = cc_layers.Pooling2DLayer(conv2, 2, stride=2)
conv3 = cc_layers.Conv2DNoBiasLayer(
pool2,
n_filters=256,
filter_size=5,
weights_std=winit3,
nonlinearity=nonlinearity,
pad=2)
deconv3 = cc_layers.Deconv2DNoBiasLayer(
conv3, conv3, nonlinearity=layers.identity)
unpool4 = cc_layers.Unpooling2DLayer(deconv3, pool2)
deconv4 = cc_layers.Deconv2DNoBiasLayer(
unpool4, conv2, nonlinearity=layers.identity)
unpool5 = cc_layers.Unpooling2DLayer(deconv4, pool1)
output = cc_layers.Deconv2DNoBiasLayer(
unpool5, conv1, nonlinearity=layers.identity)
class SupervisedModel(anna.models.SupervisedModel):
batch = 128
input = cc_layers.Input2DLayer(batch, 3, 96, 96)
k = float(numpy.random.rand()*1+0.2)
print '## k = %.3f' % k
winit1 = k/numpy.sqrt(5*5*3)
winit2 = k/numpy.sqrt(5*5*64)
winit3 = k/numpy.sqrt(5*5*128)
binit = 0.0
weights_std=winit2,
nonlinearity=nonlinearity,
pad=2)
pool2 = cc_layers.CudaConvnetPooling2DLayer(conv2, 2, stride=2)
conv3 = cc_layers.CudaConvnetConv2DNoBiasLayer(
pool2,
n_filters=256,
filter_size=5,
weights_std=winit3,
nonlinearity=nonlinearity,
pad=2)
deconv3 = cc_layers.CudaConvnetDeconv2DNoBiasLayer(
conv3, conv3, nonlinearity=layers.identity)
unpool4 = cc_layers.CudaConvnetUnpooling2DLayer(deconv3, pool2)
deconv4 = cc_layers.CudaConvnetDeconv2DNoBiasLayer(
unpool4, conv2, nonlinearity=layers.identity)
unpool5 = cc_layers.CudaConvnetUnpooling2DLayer(deconv4, pool1)
output = cc_layers.CudaConvnetDeconv2DNoBiasLayer(
unpool5, conv1, nonlinearity=layers.identity)
class SupervisedModel(fastor.models.SupervisedModel):
batch = 128
input = cc_layers.CudaConvnetInput2DLayer(batch, 3, 96, 96)
k = float(numpy.random.rand()*1+0.2)
print '## k = %.3f' % k
winit1 = k/numpy.sqrt(5*5*3) # was = 0.25
winit2 = k/numpy.sqrt(5*5*64)
winit3 = k/numpy.sqrt(5*5*128)
binit = 0.0
n_filters=192,
filter_size=3,
weights_std=winit3,
nonlinearity=nonlinearity,
pad=1)
winitD1 = k/numpy.sqrt(numpy.prod(conv3.get_output_shape()))
winitD2 = k/numpy.sqrt(300)
conv3_shuffle = cc_layers.ShuffleC01BToBC01Layer(conv3)
fc4 = layers.DenseLayer(
conv3_shuffle,
n_outputs=300,
weights_std=winitD1,
init_bias_value=1.0,
nonlinearity=layers.rectify,
dropout=0.0)
output = layers.DenseLayer(
fc4,
n_outputs=10,
weights_std=winitD2,
init_bias_value=0.0,
nonlinearity=layers.softmax)
nonlinearity=nonlinearity,
pad=2)
pool3 = cc_layers.CudaConvnetPooling2DLayer(conv3, 12, stride=12)
winitD1 = k/numpy.sqrt(numpy.prod(pool3.get_output_shape()))
winitD2 = k/numpy.sqrt(512)
pool3_shuffle = cc_layers.ShuffleC01BToBC01Layer(pool3)
fc4 = layers.DenseLayer(
pool3_shuffle,
n_outputs = 512,
weights_std=winitD1,
init_bias_value=1.0,
nonlinearity=layers.rectify,
dropout=0.5)
output = layers.DenseLayer(
fc4,
n_outputs=10,
weights_std=winitD2,
init_bias_value=0.0,
nonlinearity=layers.softmax)