Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
for e in read_terms().iter():
if ns('rdf:about') in e.attrib:
lname = e.attrib[ns('rdf:about')].split('#')[-1]
if e.tag == ns('rdfs:Class') and lname.endswith('Table'):
tables[lname] = e
elif e.tag == ns('rdf:Property'):
columns[lname] = e
comps = {}
for subdir, spec in COMPONENTS.items():
table = make_table(tables.pop(spec['table']))
for c, req in spec['columns']:
table['tableSchema']['columns'].append(make_column(columns[c], req))
comps[subdir] = table
dump(
table,
REPO_DIR.joinpath(
'components', subdir, '{0}-metadata.json'.format(spec['table'])),
indent=4)
for subdir, comprefs in MODULES.items():
dump(
OrderedDict([
("@context", ["http://www.w3.org/ns/csvw", {"@language": "en"}]),
("dc:conformsTo",
"http://cldf.clld.org/v1.0/terms.rdf#{0}".format(subdir)),
("dialect", {
"commentPrefix": None,
}),
("tables", [comps[ref] for ref in comprefs]),
]),
DBSession.execute("update source set description = title where description is null and title is not null;")
DBSession.execute("update source set description = booktitle where description is null and booktitle is not null;")
for row in list(DBSession.execute(
"select pk, pages, pages_int, startpage_int from source where pages_int < 0")):
pk, pages, number, start = row
_start, _end, _number = compute_pages(pages)
if _number > 0 and _number != number:
DBSession.execute(
"update source set pages_int = %s, startpage_int = %s where pk = %s" %
(_number, _start, pk))
DBSession.execute(
"update ref set endpage_int = %s where pk = %s" %
(_end, pk))
jsondump(changes, args.data_dir.joinpath('references', 'changes.json'))
columns[lname] = e
comps = {}
for subdir, spec in COMPONENTS.items():
table = make_table(tables.pop(spec['table']))
for c, req in spec['columns']:
table['tableSchema']['columns'].append(make_column(columns[c], req))
comps[subdir] = table
dump(
table,
REPO_DIR.joinpath(
'components', subdir, '{0}-metadata.json'.format(spec['table'])),
indent=4)
for subdir, comprefs in MODULES.items():
dump(
OrderedDict([
("@context", ["http://www.w3.org/ns/csvw", {"@language": "en"}]),
("dc:conformsTo",
"http://cldf.clld.org/v1.0/terms.rdf#{0}".format(subdir)),
("dialect", {
"commentPrefix": None,
}),
("tables", [comps[ref] for ref in comprefs]),
]),
REPO_DIR.joinpath('modules', subdir, '{0}-metadata.json'.format(subdir)),
indent=4)
for year in set(s.year for s in sources if s.year):
# let's see if something better was published!
eligible = [s for s in sources if s.year and s.year <= year]
if eligible:
potential_meds.append(sorted(eligible)[0])
# we store the precomputed sources information as jsondata:
ldstatus[l.id] = [
med,
[s.__json__() for s in
sorted(set(potential_meds), key=lambda s: -s.year)]]
if i and i % 1000 == 0:
print(i)
DBSession.close()
dump(ldstatus, 'glottolog3/static/ldstatus.json', indent=4)