Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def compute(self, chunk_i):
if self.config['crash']:
raise SomeCrash("CRASH!!!!")
r = np.zeros(recs_per_chunk, self.dtype)
r['time'] = chunk_i + self.config['secret_time_offset']
r['length'] = r['dt'] = 1
r['channel'] = np.arange(len(r))
return r
class SomeCrash(Exception):
pass
@strax.takes_config(
strax.Option('base_area', default=0),
strax.Option('give_wrong_dtype', default=False),
strax.Option('bonus_area', default_by_run=[(0, 0), (1, 1)]))
class Peaks(strax.Plugin):
provides = 'peaks'
data_kind = 'peaks'
depends_on = ('records',)
dtype = strax.peak_dtype()
parallel = True
def compute(self, records):
if self.config['give_wrong_dtype']:
return np.zeros(5, [('a', np.int), ('b', np.float)])
p = np.zeros(len(records), self.dtype)
p['time'] = records['time']
p['length'] = p['dt'] = 1
import pytest
import strax
import numpy as np
# TODO: these are small modifications of the test helpers in test_core.py
# Can we avoid duplication somehow?
n_chunks = 10
recs_per_chunk = 10
run_id = '0'
class SomeCrash(Exception):
pass
@strax.takes_config(
strax.Option('crash', default=False)
)
class Records(strax.ParallelSourcePlugin):
provides = 'records'
depends_on = tuple()
dtype = strax.record_dtype()
def compute(self, chunk_i):
if self.config['crash']:
raise SomeCrash("CRASH!!!!")
r = np.zeros(recs_per_chunk, self.dtype)
r['time'] = chunk_i
r['length'] = 1
r['dt'] = 1
r['channel'] = np.arange(len(r))
return r
# Normalize
with np.errstate(divide='ignore', invalid='ignore'):
x /= x.sum(axis=1).reshape(-1, 1)
result = np.ones((len(peaks), 2), dtype=np.float32) * float('nan')
with self.graph.as_default():
result[peak_mask, :] = self.nn.predict(x)
# Convert from mm to cm... why why why
result /= 10
return dict(x=result[:, 0], y=result[:, 1])
@export
@strax.takes_config(
strax.Option('s1_max_width', default=150,
help="Maximum (IQR) width of S1s"),
strax.Option('s1_min_n_channels', default=3,
help="Minimum number of PMTs that must contribute to a S1"),
strax.Option('s2_min_area', default=10,
help="Minimum area (PE) for S2s"),
strax.Option('s2_min_width', default=200,
help="Minimum width for S2s"))
class PeakClassification(strax.Plugin):
__version__ = '0.0.1'
depends_on = ('peak_basics',)
dtype = [
('type', np.int8, 'Classification of the peak.')]
parallel = True
def compute(self, peaks):
results = np.zeros(n, dtype=np.int32)
left_i = 0
right_i = 0
for i, peak in enumerate(peaks):
while t[left_i] + window < t[i] and left_i < n - 1:
left_i += 1
while t[right_i] - window < t[i] and right_i < n - 1:
right_i += 1
results[i] = np.sum(a[left_i:right_i + 1] > a[i] * fraction)
return results - 1
@export
@strax.takes_config(
strax.Option('trigger_min_area', default=100,
help='Peaks must have more area (PE) than this to '
'cause events'),
strax.Option('trigger_max_competing', default=7,
help='Peaks must have FEWER nearby larger or slightly smaller'
' peaks to cause events'),
strax.Option('left_event_extension', default=int(1e6),
help='Extend events this many ns to the left from each '
'triggering peak'),
strax.Option('right_event_extension', default=int(1e6),
help='Extend events this many ns to the right from each '
'triggering peak'),
strax.Option('max_event_duration', default=int(1e7),
help='Events longer than this are forcefully ended, '
'triggers in the truncated part are lost!'),
)
offset = rec_i * samples_per_record
r['data'][:n_store] = p.raw_data[offset:offset + n_store]
output_record_index += 1
results.append(records)
if len(results) >= events_per_chunk:
yield finish_results()
mypax.shutdown()
if len(results):
yield finish_results()
@export
@strax.takes_config(
strax.Option('pax_raw_dir', default='/data/xenon/raw', track=False,
help="Directory with raw pax datasets"),
strax.Option('stop_after_zips', default=0, track=False,
help="Convert only this many zip files. 0 = all."),
strax.Option('events_per_chunk', default=10, track=False,
help="Number of events to yield per chunk")
)
class RecordsFromPax(strax.Plugin):
provides = 'raw_records'
data_kind = 'raw_records'
depends_on = tuple()
dtype = strax.record_dtype()
parallel = False
def iter(self, *args, **kwargs):
if not os.path.exists(self.config['pax_raw_dir']):
import string
import typing as ty
import warnings
import numexpr
import numpy as np
import pandas as pd
import strax
export, __all__ = strax.exporter()
__all__ += ['RUN_DEFAULTS_KEY']
RUN_DEFAULTS_KEY = 'strax_defaults'
@strax.takes_config(
strax.Option(name='storage_converter', default=False,
help='If True, save data that is loaded from one frontend '
'through all willing other storage frontends.'),
strax.Option(name='fuzzy_for', default=tuple(),
help='Tuple of plugin names for which no checks for version, '
'providing plugin, and config will be performed when '
'looking for data.'),
strax.Option(name='fuzzy_for_options', default=tuple(),
help='Tuple of config options for which no checks will be '
'performed when looking for data.'),
strax.Option(name='allow_incomplete', default=False,
help="Allow loading of incompletely written data, if the "
"storage systems support it"),
strax.Option(name='allow_rechunk', default=True,
help="Allow rechunking of data during writing."),
strax.Option(name='allow_multiprocess', default=False,
r['n_channels'] = (p['area_per_channel'] > 0).sum(axis=1)
r['range_50p_area'] = p['width'][:, 5]
r['max_pmt'] = np.argmax(p['area_per_channel'], axis=1)
r['max_pmt_area'] = np.max(p['area_per_channel'], axis=1)
# TODO: get n_top_pmts from config...
area_top = (p['area_per_channel'][:, :127]
* to_pe[:127].reshape(1, -1)).sum(axis=1)
# Negative-area peaks get 0 AFT - TODO why not NaN?
m = p['area'] > 0
r['area_fraction_top'][m] = area_top[m]/p['area'][m]
return r
@export
@strax.takes_config(
strax.Option(
'nn_architecture',
help='Path to JSON of neural net architecture',
default_by_run=[
(0, pax_file('XENON1T_tensorflow_nn_pos_20171217_sr0.json')),
(first_sr1_run, pax_file('XENON1T_tensorflow_nn_pos_20171217_sr1.json'))]), # noqa
strax.Option(
'nn_weights',
help='Path to HDF5 of neural net weights',
default_by_run=[
(0, pax_file('XENON1T_tensorflow_nn_pos_weights_20171217_sr0.h5')),
(first_sr1_run, pax_file('XENON1T_tensorflow_nn_pos_weights_20171217_sr1.h5'))]), # noqa
strax.Option('min_reconstruction_area',
help='Skip reconstruction if area (PE) is less than this',
import os
import shutil
import numpy as np
import numba
import strax
from .common import to_pe, pax_file, get_resource, get_elife
export, __all__ = strax.exporter()
first_sr1_run = 170118_1327
@export
@strax.takes_config(
strax.Option('safe_break_in_pulses', default=1000,
help="Time (ns) between pulse starts indicating a safe break "
"in the datastream -- peaks will not span this."),
strax.Option('input_dir', type=str, track=False,
help="Directory where readers put data"),
strax.Option('erase', default=False, track=False,
help="Delete reader data after processing"))
class DAQReader(strax.ParallelSourcePlugin):
provides = 'raw_records'
depends_on = tuple()
dtype = strax.record_dtype()
rechunk_on_save = False
def _path(self, chunk_i):
return self.config["input_dir"] + f'/{chunk_i:06d}'