Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def query_entity_def(session, name: str) -> str:
""" Return a single (best) definition of an entity """
rl = _query_entity_definitions(session, name)
return correct_spaces(rl[0]["answer"]) if rl else ""
def append_names(rd: RegisterType, q, prop_func) -> None:
""" Iterate over query results and add them to the result dictionary rd,
assuming that the key is a person name """
for p in q:
s = correct_spaces(prop_func(p))
ai = dict(
domain=p.domain,
uuid=p.id,
heading=p.heading,
timestamp=p.timestamp,
ts=p.timestamp.isoformat()[0:16],
url=p.url,
)
# Obtain the key within rd that should be updated with new
# data. This may be an existing key, a new key or None if no
# update is to be performed.
s = name_key_to_update(rd, s)
if s is not None:
rd[s][p.id] = ai # Add to a dict of UUIDs
would prefer another one """
# Skip titles that simply say that somebody is the husband or
# wife of somebody else
return answer.startswith(_DONT_LIKE_TITLE)
rl = _query_person_titles(session, name)
len_rl = len(rl)
index = 0
while index < len_rl and we_dont_like(rl[index]["answer"]):
index += 1
if index >= len_rl:
# If we don't like any answer anyway, go back to the topmost one
index = 0
if index >= len_rl:
return "", None
return correct_spaces(rl[index]["answer"]), rl[index]["sources"][0]["domain"]
va.append(sep)
a.append(sep)
# We convert inflectable numbers to their text equivalents
# since the speech engine can't be relied upon to get the
# inflection of numbers right
va.append(numbers_to_neutral(rn))
a.append(rn)
cnt += 1
tail = ["stoppar á", to_dative(stop.name)]
va.extend(tail)
a.extend(tail)
# Store a location coordinate and a bus stop name in the context
query.set_context({"location": stop.location, "bus_stop": stop.name})
voice_answer = correct_spaces(" ".join(va) + ".")
answer = correct_spaces(" ".join(a))
answer = answer[0].upper() + answer[1:]
response = dict(answer=answer)
return response, answer, voice_answer
self._error = None # Erase previous error, if any
self._qtype = None # Erase previous query type, if any
self._key = None
self._toklist = None
q = self._query.strip()
if not q:
self.set_error("E_EMPTY_QUERY")
return False
toklist = tokenize(q, auto_uppercase=self._auto_uppercase and q.islower())
toklist = list(toklist)
# The following seems not to be needed and may complicate things
# toklist = list(recognize_entities(toklist, enclosing_session=self._session))
actual_q = correct_spaces(" ".join(t.txt for t in toklist if t.txt))
if actual_q:
actual_q = actual_q[0].upper() + actual_q[1:]
if not any(actual_q.endswith(s) for s in ("?", ".", "!")):
actual_q += "?"
# Update the beautified query string, as the actual_q string
# probably has more correct capitalization
self.set_beautified_query(actual_q)
if Settings.DEBUG:
# Log the query string as seen by the parser
print("Query is: '{0}'".format(actual_q))
parse_result, trees = Query._parse(toklist)
if not trees:
return len_new < len_old
if len_new >= _MAX_TITLE_LENGTH:
# This one is too long: we don't want it
return False
# Otherwise, longer is better
return len_new > len_old
with BIN_Db.get_db() as bindb:
for p in q:
# Insert the name into the list if it's not already there,
# or if the new title is longer than the previous one
if p.name not in toplist or is_better_title(
p.title, toplist[p.name][0]
):
toplist[p.name] = (
correct_spaces(p.title),
p.article_url,
p.id,
bindb.lookup_name_gender(p.name),
)
if len(toplist) >= limit:
# We now have as many names as we initially wanted: terminate the loop
break
with changedlocale() as strxfrm:
# Convert the dictionary to a sorted list of dicts
return sorted(
[
dict(name=name, title=tu[0], gender=tu[3], url=tu[1], uuid=tu[2])
for name, tu in toplist.items()
],
key=lambda x: strxfrm(x["name"]),