Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
filelist=tspl_utils.getfilelist(n.filearg)
procs = min(len(filelist),n.p[0])
output = n.o[0]
job=pickle.load(open(filelist[0]))
jid=job.id
epoch=job.end_time
ld=lariat_utils.LariatData()
ld.set_job(jid,end_epoch=epoch,directory=analysis_conf.lariat_path)
if procs < 1:
print 'Must have at least one file'
exit(1)
pool = multiprocessing.Pool(processes=procs)
partial_work=functools.partial(do_work,mintime=3600.,wayness=16,
lariat_dict=ld)
results=pool.map(partial_work,filelist)
print len(results)
sus={}
for (f_stall, mem_rate, cpi, ename, jid, user, su) in results:
if f_stall is None:
continue
if ename in sus:
sus[ename]+=su
else:
sus[ename]=su
os.makedirs(output_dir)
# At this stage we have: the root_process_pid, traces, and a mapping
# of pid -> trace file path
# TODO: create sub-dirs based on the number of traces to have at most a
# few 100 to 1000 files per dir
# queues of things todo and things done
manager = multiprocessing.Manager()
todo = manager.Queue()
done = manager.Queue()
# use a rather high number of processes/threads: x times the number of CPU
# cores
if parallel:
pool = multiprocessing.Pool(multiprocessing.cpu_count() * 2)
# we start from the root pid
root_proc = Process(pid=root_pid, ppid=None, cwd=cwd, init_exec=None)
todo.put(root_proc)
# track if some process traces are unrelated to the process tree
has_orphans = False
traces_len = len(traces)
timeout = timeout_func(traces_len)
for i in range(traces_len):
# if we have orphaned traces, we will timeout eventually
try:
proc = todo.get(block=True, timeout=timeout)
dump_dict = loadh5(dump_file)
kp[i] = dump_dict["kp"]
z[i] = dump_dict["z"]
desc[i] = dump_dict["desc"]
print("")
# Create arguments
pool_arg = []
idx = 0
for ii, jj in cur_pairs:
idx += 1
pool_arg += [(dump_dir, idx, ii, jj)]
# Run mp job
ratio_CPU = 0.8
number_of_process = int(ratio_CPU * mp.cpu_count())
pool = mp.Pool(processes=number_of_process)
manager = mp.Manager()
queue = manager.Queue()
for idx_arg in xrange(len(pool_arg)):
pool_arg[idx_arg] = pool_arg[idx_arg] + (queue,)
# map async
pool_res = pool.map_async(dump_data_pair, pool_arg)
# monitor loop
while True:
if pool_res.ready():
break
else:
size = queue.qsize()
print("\rDistMat {} / {}".format(size, len(pool_arg)), end="")
sys.stdout.flush()
time.sleep(1)
pool.close()
def determine_dice_scores():
dice_scores = []
#Let's multiprocess
pool = Pool(processes=4)
#Take subset
print "N images", len(test_images)
kernel7 = np.array([[[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1],
[1,1,1,1,1,1,1],
[0,1,1,1,1,1,0],
[0,0,1,1,1,0,0]]])
kernel9 = np.array([[[0,0,0,1,1,1,0,0,0],
[0,1,1,1,1,1,1,1,0],
def parallelize(function, pool_input):
global args;
if len(pool_input) > 0:
threads = min([len(pool_input),args.threads]);
if args.threads > 1:
pool = multiprocessing.Pool(processes=threads);
pool_output = pool.map(function, pool_input);
pool.close() # no more tasks
pool.join() # wrap up current tasks
else:
pool_output = [];
for input in pool_input:
pool_output.append(function(input));
else:
pool_output = [];
return(pool_output);
def pmap(func, mlist, n_jobs):
if n_jobs != 1:
pool = Pool(n_jobs if n_jobs != -1 else None)
mmap = pool.map
else:
mmap = map
return mmap(func, mlist)
param_space = [p for p in itertools.combinations(np.arange(0, 2.01, 0.01), self.feature_num)]
print('generating parameter list done', file=sys.stderr)
global _examples
_examples = examples
global _decode_results
_decode_results = decode_results
global _evaluator
_evaluator = evaluator
global _ranker
_ranker = self
def _norm(_param):
return sum(p ** 2 for p in _param)
with multiprocessing.Pool(processes=num_workers) as pool:
# segment the parameter space
segment_size = int(len(param_space) / num_workers / 5)
param_space_segments = []
ptr = 0
while ptr < len(param_space):
param_space_segments.append(param_space[ptr: ptr + segment_size])
ptr += segment_size
print('generated %d parameter segments' % len(param_space_segments), file=sys.stderr)
results = pool.imap_unordered(_rank_segment_worker, param_space_segments)
for param, score in results:
if score > best_score or score == best_score and _norm(param) < _norm(best_param):
print('[Main] New param=%s, score=%.4f' % (param, score), file=sys.stderr)
best_param = param
best_score = score
if norowdim:
source_depths = np.array([source_depths])
receiver_depths = np.array([receiver_depths])
nocoldim = distances.ndim == 0
if nocoldim:
distances = np.array([distances])
ttimes = np.full(shape=(source_depths.shape[0], distances.shape[0]), fill_value=np.nan)
def mp_callback(index, array, _callback=None):
def _(result):
array[index] = result
if _callback is not None:
_callback()
return _
pool = Pool()
for idx, sdepth, rdepth in zip(count(), source_depths, receiver_depths):
tmp_ttimes = ttimes[idx]
for i, dist in enumerate(distances):
pool.apply_async(min_traveltime, (model, sdepth, rdepth, dist, phases),
callback=mp_callback(i, tmp_ttimes, callback))
pool.close()
pool.join()
if norowdim or nocoldim:
ttimes = ttimes.flatten()
return ttimes
bidule = BackupProxmox(cluster, None, status_queue, args)
else:
bidule = BackupPlain(cluster, None, status_queue, args)
bidule.expire_live()
if args.cluster is None and args.profile is None and args.vmid is None:
Log.debug('Expiring our snapshots')
# Dummy Ceph object used to retrieve the real backup Object
ceph = Ceph(None)
with Status_updater(manager, 'images cleaned up on backup cluster') as status_queue:
data = list()
for i in ceph.backup.ls():
data.append({'ceph': ceph, 'image': i, 'status_queue': status_queue})
status_queue.put('add_item')
with multiprocessing.Pool(config['backup_worker']) as pool:
for i in pool.imap_unordered(Backup.expire_backup, data):
pass
manager.shutdown()
elif args.action == 'ls':
restore = Restore(args.rbd, None)
data = restore.ls()
if args.rbd is None:
pt = pretty.Pt(['Ident', 'Disk', 'UUID'])
for i in data:
row = [i['ident'], i['disk'], i['uuid']]
pt.add_row(row)
else:
pt = pretty.Pt(['Creation date', 'UUID'])
# names the user has provided and scale the randoms to the correct ammount.
# Object ids are also sorted in increasing id for later binary search.
open_hdf5_file = core_utils.file_checker_loader(hdf5_data_file_name)
hdf5_data_grp = open_hdf5_file['data']
# Prime our output array.
n_reference = len(hdf5_data_grp)
reference_unknown_array = np.empty(n_reference, dtype=np.float32)
key_array = hdf5_data_grp.keys()
pdf_maker_obj = PDFMaker(key_array, args)
# Initialize the workers.
loader_pool = Pool(1)
matcher_pool = Pool(np.min((args.n_processes - 1, 1)))
print("\tPre-loading reference data...")
loader_result = loader_pool.imap(
_load_pair_data,
[(args.input_pair_hdf5_file, scale_name,
key_array[start_idx:start_idx + args.n_reference_load_size])
for start_idx in xrange(0, len(key_array),
args.n_reference_load_size)])
print("\tPre-loading unknown data...")
id_array, rand_ratio, weight_array, ave_weight = \
_compute_region_densities_and_weights(
unknown_data, hdf5_data_grp, args)
# Close the hdf5 file
open_hdf5_file.close()