Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def _parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self._gettoken()
a, z = self._parse_rhs()
self._expect(token.RPAR)
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self._gettoken()
return a, z
else:
self._raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def _gettoken(self):
tup = next(self.generator)
while tup[0] in (token.COMMENT, token.NL):
tup = next(self.generator)
self.type, self.value, self.begin, prefix = tup
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
# TODO this might be an issue?! Using token.NAME here?
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = self._token_namespace.generate_token_id(value)
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def _parse_alt(self):
# ALT: ITEM+
a, b = self._parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self._parse_item()
b.addarc(c)
b = d
return a, b
def _expect(self, type):
if self.type != type:
self._raise_error("expected %s(%s), got %s(%s)",
type, token.tok_name[type], self.type, self.value)
value = self.value
self._gettoken()
return value
def _parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self._gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self._expect(token.NAME)
self._expect(token.COLON)
a, z = self._parse_rhs()
self._expect(token.NEWLINE)
#self._dump_nfa(name, a, z)
dfa = self._make_dfa(a, z)
#self._dump_dfa(name, dfa)
# oldlen = len(dfa)
self._simplify_dfa(dfa)
# newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
def _gettoken(self):
tup = next(self.generator)
while tup[0] in (token.COMMENT, token.NL):
tup = next(self.generator)
self.type, self.value, self.begin, prefix = tup