Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
'Cannot import ivre.db.%s for %s',
modulename,
url.geturl(),
exc_info=True,
)
return None
for submod in modulename.split('.'):
module = getattr(module, submod)
result = getattr(module, classname)(url)
result.globaldb = self
return result
return None
db = MetaDB(
url=config.DB if hasattr(config, "DB") else None,
urls=dict([x[3:].lower(), getattr(config, x)]
for x in dir(config) if x.startswith('DB_')))
continue
fname = fname[0]
fname_sensor = fname.groupdict()['sensor']
if fname_sensor in procs:
proc = procs[fname_sensor]
else:
proc = create_process(progname, fname_sensor)
procs[fname_sensor] = proc
fname = fname.group()
# Our "lock system": if we can move the file, it's ours
try:
shutil.move(os.path.join(directory, fname),
os.path.join(directory, "current"))
except shutil.Error:
continue
if config.DEBUG:
utils.LOGGER.debug("Handling %s", fname)
fname = os.path.join(directory, "current", fname)
fdesc = utils.open_file(fname)
handled_ok = True
for line in fdesc:
try:
proc.stdin.write(line)
except ValueError:
utils.LOGGER.warning("Error while handling line %r. "
"Trying again", line)
proc = create_process(progname, fname_sensor)
procs[fname_sensor] = proc
# Second (and last) try
try:
proc.stdin.write(line)
utils.LOGGER.warning(" ... OK")
def _update_timeslots(cls, updatespec, insertspec, rec):
"""
If configured, adds timeslots in `updatespec`.
config.FLOW_TIME enables timeslots.
if config.FLOW_TIME_FULL_RANGE is set, a flow is linked to every
timeslots between its start_time and end_time.
Otherwise, it is only linked to the timeslot corresponding to its
start_time.
"""
if config.FLOW_TIME:
if config.FLOW_TIME_FULL_RANGE:
generator = cls._get_timeslots(
rec['start_time'],
rec['end_time'],
)
else:
generator = cls._get_timeslot(
rec['start_time'],
config.FLOW_TIME_PRECISION,
config.FLOW_TIME_BASE
)
for tslot in generator:
tslot = dict(tslot)
tslot['start'] = utils.datetime2timestamp(tslot['start'])
updatespec.append(add_to_set_op("times", tslot))
lst = insertspec.setdefault("times", [])
if 'start' in scan:
scan['start'] = datetime.datetime.utcfromtimestamp(
int(scan['start'])
)
if 'scaninfos' in scan:
scan["scaninfo"] = scan.pop('scaninfos')
scan["sha256"] = utils.decode_hex(scan.pop('_id'))
insrt = insert(self.tables.scanfile).values(
**dict(
(key, scan[key])
for key in ['sha256', 'args', 'scaninfo', 'scanner', 'start',
'version', 'xmloutputversion']
if key in scan
)
)
if config.DEBUG:
scanfileid = self.db.execute(
insrt.returning(self.tables.scanfile.sha256)
).fetchone()[0]
utils.LOGGER.debug("SCAN STORED: %r", utils.encode_hex(scanfileid))
else:
self.db.execute(insrt)
if use_argparse:
parser.add_argument('files', nargs='*', metavar='FILE',
help='Files to import in the flow database')
parser.add_argument("-v", "--verbose", help="verbose mode",
action="store_true")
parser.add_argument("-t", "--type", help="file type",
choices=list(PARSERS_CHOICE))
parser.add_argument("-f", "--pcap-filter",
help="pcap filter to apply (when supported)")
parser.add_argument("-C", "--no-cleanup",
help="avoid port cleanup heuristics",
action="store_true")
args = parser.parse_args()
if args.verbose:
config.DEBUG = True
query_cache = {}
for fname in args.files:
try:
fileparser = PARSERS_CHOICE[args.type]
except KeyError:
with utils.open_file(fname) as fdesc:
try:
fileparser = PARSERS_MAGIC[fdesc.read(4)]
except KeyError:
utils.LOGGER.warning(
'Cannot find the appropriate parser for file %r',
fname,
)
continue
bulk = db.flow.start_bulk_insert()
def main():
# write headers
sys.stdout.write(webutils.JS_HEADERS)
sys.stdout.write("\r\n")
sys.stdout.writelines(
'config.%s = %s;\n' % (key, json.dumps(value))
for key, value in viewitems({
"notesbase": config.WEB_NOTES_BASE,
"dflt_limit": config.WEB_LIMIT,
"warn_dots_count": config.WEB_WARN_DOTS_COUNT,
"publicsrv": config.WEB_PUBLIC_SRV,
"uploadok": config.WEB_UPLOAD_OK,
"flow_time_precision": config.FLOW_TIME_PRECISION,
"version": VERSION,
})
def _find_get_notepad_pages():
"""This function finds and returns the get_notepad_pages() based
on the configuration.
"""
if config.WEB_GET_NOTEPAD_PAGES is None:
return None
if not isinstance(config.WEB_GET_NOTEPAD_PAGES, tuple):
config.WEB_GET_NOTEPAD_PAGES = (config.WEB_GET_NOTEPAD_PAGES, ())
return functools.partial(
GET_NOTEPAD_PAGES[config.WEB_GET_NOTEPAD_PAGES[0]],
*config.WEB_GET_NOTEPAD_PAGES[1]
)
iterable of (timestamp, spec) values. This generic
implementation does not use the bulk capacity of the
underlying DB implementation but uses a local cache and calls
its `.insert_or_update()` method.
"""
def _bulk_execute(records):
utils.LOGGER.debug("DB:local bulk upsert: %d", len(records))
for spec, metadata in viewitems(records):
self.insert_or_update(metadata.firstseen,
dict(spec, **metadata.data),
getinfos=getinfos,
lastseen=metadata.lastseen)
records = {}
utils.LOGGER.debug("DB: creating a local bulk upsert (%d records)",
config.LOCAL_BATCH_SIZE)
if separated_timestamps:
for timestamp, spec in specs:
if spec is None:
continue
infos = spec.pop('infos', None)
spec = tuple((key, spec[key]) for key in sorted(spec))
records.setdefault(spec, _RecInfo(infos)).update(timestamp)
if len(records) >= config.LOCAL_BATCH_SIZE:
_bulk_execute(records)
records = {}
else:
for spec in specs:
if spec is None:
continue
infos = spec.pop('infos', None)
basespec = tuple(
def date_round(cls, date):
if isinstance(date, datetime):
ts = utils.datetime2timestamp(date)
else:
ts = date
ts = ts - (ts % config.FLOW_TIME_PRECISION)
if isinstance(date, datetime):
return datetime.fromtimestamp(ts)
return ts