Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_direct_instantiation():
with pytest.warns(UserWarning):
Reader('storage', 'search')
def init():
conf_file = os.path.expanduser('~/.peeprc')
if os.path.exists(conf_file): return
email = password = ''
while 1:
print 'Sign in to Google Reader with your'
try:
email, password = raw_input('Email: '), getpass('Password: ')
Reader(email, password)
except (EOFError, KeyboardInterrupt):
sys.exit()
except Exception, e:
print '\nThe username or password you entered is incorrect.\n'
else:
CONF.set('credential', 'email', email)
CONF.set('credential', 'password', password)
CONF.write(file(conf_file, 'w'))
os.chmod(conf_file, stat.S_IREAD|stat.S_IWRITE)
break
self.x_test_tfph = tf.placeholder(tf.float32, shape=[None, *self.image_size], name='x_test_tfph')
self.y_test_tfph = tf.placeholder(tf.float32, shape=[None, *self.image_size], name='y_test_tfph')
self.fake_x_tfph = tf.placeholder(tf.float32, shape=[None, *self.image_size], name='fake_x_tfph')
self.fake_y_tfph = tf.placeholder(tf.float32, shape=[None, *self.image_size], name='fake_y_tfph')
self.G_gen = Generator(name='G', ngf=self.ngf, norm=self.norm, image_size=self.image_size,
_ops=self._G_gen_train_ops)
self.Dy_dis = Discriminator(name='Dy', ndf=self.ndf, norm=self.norm, _ops=self._Dy_dis_train_ops,
use_sigmoid=self.use_sigmoid)
self.F_gen = Generator(name='F', ngf=self.ngf, norm=self.norm, image_size=self.image_size,
_ops=self._F_gen_train_ops)
self.Dx_dis = Discriminator(name='Dx', ndf=self.ndf, norm=self.norm, _ops=self._Dx_dis_train_ops,
use_sigmoid=self.use_sigmoid)
x_reader = Reader(self.x_path, name='X', image_size=self.image_size, batch_size=self.flags.batch_size)
y_reader = Reader(self.y_path, name='Y', image_size=self.image_size, batch_size=self.flags.batch_size)
self.x_imgs = x_reader.feed()
self.y_imgs = y_reader.feed()
self.fake_x_pool_obj = utils.ImagePool(pool_size=50)
self.fake_y_pool_obj = utils.ImagePool(pool_size=50)
# cycle consistency loss
cycle_loss = self.cycle_consistency_loss(self.x_imgs, self.y_imgs)
# X -> Y
self.fake_y_imgs = self.G_gen(self.x_imgs)
self.G_gen_loss = self.generator_loss(self.Dy_dis, self.fake_y_imgs, use_lsgan=self.use_lsgan)
self.G_loss = self.G_gen_loss + cycle_loss
self.Dy_dis_loss = self.discriminator_loss(self.Dy_dis, self.y_imgs, self.fake_y_tfph,
use_lsgan=self.use_lsgan)
def __init__(self, src="uhd", dst="uhd", samp_rate=2e6):
super(reader_emulate, self).__init__()
if src == "uhd":
self._src = usrp_src.usrp_src()
hi_val = 1.05
else:
self._src = blocks.wavfile_source(src, False)
hi_val = 1.05 # 1.1
self._bin_src = binary_src(samp_rate, encode="miller", idle_bit=1, repeat=[0, 1, 1, 0, 0, 1, 0])
self._reader = Reader(self._bin_src.set_bits)
self._back = background.background(False, True, self._reader)
self._trans = transition_sink.transition_sink(samp_rate, self._back.append, hi_val=hi_val)
self._connect(self._src, self._trans)
freq = 13560000
A = 0.90
self._mult = blocks.multiply_vcc(1)
self._carrier = analog.sig_source_c(samp_rate, analog.GR_COS_WAVE, freq, A, 0)
self.connect((self._carrier, 0), (self._mult, 0))
self.connect((self._bin_src, 0), (self._mult, 1))
if dst == "uhd":
self._sink = uhd.usrp_sink(
self.xy_fake_pairs_tfph = tf.placeholder(tf.float32, shape=[None, self.img_size[0], self.img_size[1], 2],
name='xy_fake_pairs_tfph')
self.yx_fake_pairs_tfph = tf.placeholder(tf.float32, shape=[None, self.img_size[0], self.img_size[1], 2],
name='yx_fake_pairs_tfph')
self.G_gen = Generator(name='G', ngf=self.ngf, norm=self.norm, image_size=self.img_size,
_ops=self._G_gen_train_ops)
self.Dy_dis = Discriminator(name='Dy', ndf=self.ndf, norm=self.norm, _ops=self._Dy_dis_train_ops,
is_lsgan=self.is_lsgan)
self.F_gen = Generator(name='F', ngf=self.ngf, norm=self.norm, image_size=self.img_size,
_ops=self._F_gen_train_ops)
self.Dx_dis = Discriminator(name='Dx', ndf=self.ndf, norm=self.norm, _ops=self._Dx_dis_train_ops,
is_lsgan=self.is_lsgan)
self.vggModel = VGG16(name='VGG16_Pretrained')
data_reader = Reader(self.data_path, name='data', image_size=self.img_size, batch_size=self.flags.batch_size,
is_train=self.flags.is_train)
# self.x_imgs_ori and self.y_imgs_ori are the images before data augmentation
self.x_imgs, self.y_imgs, self.x_imgs_ori, self.y_imgs_ori, self.img_name = data_reader.feed()
self.fake_xy_pool_obj = utils.ImagePool(pool_size=50)
self.fake_yx_pool_obj = utils.ImagePool(pool_size=50)
# cycle consistency loss
self.cycle_loss = self.cycle_consistency_loss(self.x_imgs, self.y_imgs)
# concatenation
self.fake_y_imgs = self.G_gen(self.x_imgs)
self.xy_real_pairs = tf.concat([self.x_imgs, self.y_imgs], axis=3)
self.xy_fake_pairs = tf.concat([self.x_imgs, self.fake_y_imgs], axis=3)
self.fake_x_imgs = self.F_gen(self.y_imgs)
def _build_net(self):
# tfph: tensorflow placeholder
self.x_test_tfph = tf.placeholder(tf.float32, shape=[None, *self.image_size], name='A_test_tfph')
self.y_test_tfph = tf.placeholder(tf.float32, shape=[None, *self.image_size], name='B_test_tfph')
self.G_gen = Generator(name='G', ngf=self.ngf, norm=self.norm, output_channel=self.image_size[2],
_ops=self._G_gen_train_ops)
self.Dy_dis = Discriminator(name='Dy', ndf=self.ndf, norm=self.norm, _ops=self._Dy_dis_train_ops)
self.F_gen = Generator(name='F', ngf=self.ngf, norm=self.norm, output_channel=self.image_size[2],
_ops=self._F_gen_train_ops)
self.Dx_dis = Discriminator(name='Dx', ndf=self.ndf, norm=self.norm, _ops=self._Dx_dis_train_ops)
x_reader = Reader(self.x_path, name='X', image_size=self.image_size, batch_size=self.flags.batch_size)
y_reader = Reader(self.y_path, name='Y', image_size=self.image_size, batch_size=self.flags.batch_size)
self.x_imgs = x_reader.feed()
self.y_imgs = y_reader.feed()
# cycle consistency loss
self.cycle_loss = self.cycle_consistency_loss(self.x_imgs, self.y_imgs)
# X -> Y
self.fake_y_imgs = self.G_gen(self.x_imgs)
self.G_gen_loss = self.generator_loss(self.Dy_dis, self.fake_y_imgs)
self.G_reg = self.flags.weight_decay * tf.reduce_sum(
[tf.nn.l2_loss(weight) for weight in tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope='G')])
self.G_loss = self.G_gen_loss + self.cycle_loss + self.G_reg
self.Dy_dis_loss = self.discriminator_loss(self.Dy_dis, self.y_imgs, self.fake_y_imgs)
self.Dy_dis_reg = self.flags.weight_decay * tf.reduce_sum(
def _build_net(self):
self.input_tfph = tf.placeholder(tf.float32, shape=[None, *self.img_size], name='input_tfph')
self.z_in_tfph = tf.placeholder(tf.float32, shape=[None, self.flags.z_dim], name='latent_variable_tfph')
self.keep_prob_tfph = tf.placeholder(tf.float32, name='keep_prob_tfph')
# Data reader
data_reader = Reader(self.data_path, name='data', image_size=self.img_size, batch_size=self.flags.batch_size,
is_train=self.flags.is_train)
self.x_imgs, self.y_imgs, self.x_imgs_ori, self.y_imgs_ori = data_reader.feed()
# Encoder and decoder objects
self.encoder = Encoder(name='encoder', enc_c=self.enc_c, _ops=self.enc_train_ops)
self.decoder = Decoder(name='decoder', dec_c=self.dec_c, _ops=self.dec_train_ops)
# Encoding
mu_x, sigma_x = self.encoder(self.x_imgs, keep_prob=self.keep_prob_tfph)
mu_y, sigma_y = self.encoder(self.y_imgs, keep_prob=self.keep_prob_tfph)
# Sampling by re-parameterization technique
self.z_x = mu_x + sigma_x * tf.random_normal(tf.shape(mu_x), mean=0., stddev=1., dtype=tf.float32)
self.z_y = mu_y + sigma_y * tf.random_normal(tf.shape(mu_y), mean=0., stddev=1., dtype=tf.float32)
# Decoding
model_list = ['mscoco-nh512-nw512-mb64-V8843/ss.h5.merge']
models = [Model(model_file=osp.join(SAVE_ROOT, m)) for m in model_list]
valid_set = Reader(batch_size=1, data_split='test', vocab_freq='freq5', stage='val',
data_dir=data_dir, feature_file='features_1res.h5', topic_type='pred',
topic_file='lda_topics.h5', caption_switch='off', head=0, tail=1000)
bs = BeamSearch(models, beam_size=3, num_cadidates=500, max_length=20)
scores = validate(bs, valid_set)
if task == 'ra':
from model.ra import Model
model_list = ['mscoco-ra-nh512-nw512-na512-mb64-V8843/ra.h5.merge']
models = [Model(model_file=osp.join(SAVE_ROOT, m)) for m in model_list]
valid_set = Reader(batch_size=1, data_split='test', vocab_freq='freq5', stage='test',
data_dir=data_dir, feature_file='features_30res.h5',
caption_switch='off', topic_switch='off', head=0, tail=1000)
bs = BeamSearch(models, beam_size=3, num_cadidates=500, max_length=20)
scores = validate(bs, valid_set)
if task == 'rass':
from model.rass import Model
model_list = ['mscoco-rass-nh512-nw512-na512-mb64-V8843/rass.h5.merge']
models = [Model(model_file=osp.join(SAVE_ROOT, m)) for m in model_list]
valid_set = Reader(batch_size=1, data_split='test', vocab_freq='freq5', stage='val',
data_dir=data_dir, feature_file='features_30res.h5', topic_type='pred',
topic_file='lda_topics.h5', caption_switch='off', head=0, tail=1000)
bs = BeamSearch(models, beam_size=3, num_cadidates=500, max_length=20)
else:
tensors.append(input_queue[i])
min_queue_examples = int(self.fold_size * Reader.MIN_QUEUE_FRACTION)
# Generate a batch of images and labels by building up a queue of examples.
input_batches = self._generate_batches(
tensors, batch_size, min_queue_examples, shuffle=is_train
)
input_dict = {}
for i in range(len(keys)):
input_dict[keys[i]] = input_batches[i]
# Display the training images in the visualizer
if (is_train):
prefix = Reader.TRAINING_PREFIX
else:
prefix = Reader.TESTING_PREFIX
# images are too heavy for summary, but uncomment for check if needed
#tf.image_summary(prefix, images, max_images=batch_size)
tf.histogram_summary(prefix + '/image_values', input_dict['images'])
tf.histogram_summary(prefix + '/labels', input_dict['labels'])
return input_dict