cpython/Lib/lib2to3/pgen2/pgen.py
Benjamin Peterson e80b51fab7 Merged revisions 76063,76068 via svnmerge from
svn+ssh://pythondev@svn.python.org/python/branches/py3k

................
  r76063 | benjamin.peterson | 2009-11-02 12:16:28 -0600 (Mon, 02 Nov 2009) | 77 lines

  Merged revisions 76062 via svnmerge from
  svn+ssh://pythondev@svn.python.org/python/trunk

  ................
    r76062 | benjamin.peterson | 2009-11-02 12:12:12 -0600 (Mon, 02 Nov 2009) | 70 lines

    Merged revisions 74359,75081,75088,75213,75278,75303,75427-75428,75734-75736,75865,76059-76061 via svnmerge from
    svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3

    ........
      r74359 | benjamin.peterson | 2009-08-12 17:23:13 -0500 (Wed, 12 Aug 2009) | 1 line

      don't pass the deprecated print_function option
    ........
      r75081 | benjamin.peterson | 2009-09-26 22:02:57 -0500 (Sat, 26 Sep 2009) | 1 line

      let 2to3 work with extended iterable unpacking
    ........
      r75088 | benjamin.peterson | 2009-09-27 11:25:21 -0500 (Sun, 27 Sep 2009) | 1 line

      look on the type only for __call__
    ........
      r75213 | benjamin.peterson | 2009-10-03 10:09:46 -0500 (Sat, 03 Oct 2009) | 5 lines

      revert 75212; it's not correct

      People can use isinstance(x, collections.Callable) if they expect objects with
      __call__ in their instance dictionaries.
    ........
      r75278 | benjamin.peterson | 2009-10-07 16:25:56 -0500 (Wed, 07 Oct 2009) | 4 lines

      fix whitespace problems with fix_idioms #3563

      Patch by Joe Amenta.
    ........
      r75303 | benjamin.peterson | 2009-10-09 16:59:11 -0500 (Fri, 09 Oct 2009) | 1 line

      port latin-1 and utf-8 cookie improvements
    ........
      r75427 | benjamin.peterson | 2009-10-14 20:35:57 -0500 (Wed, 14 Oct 2009) | 1 line

      force floor division
    ........
      r75428 | benjamin.peterson | 2009-10-14 20:39:21 -0500 (Wed, 14 Oct 2009) | 1 line

      silence -3 warnings about __hash__
    ........
      r75734 | benjamin.peterson | 2009-10-26 16:25:53 -0500 (Mon, 26 Oct 2009) | 2 lines

      warn on map(None, ...) with more than 2 arguments #7203
    ........
      r75735 | benjamin.peterson | 2009-10-26 16:28:25 -0500 (Mon, 26 Oct 2009) | 1 line

      remove unused result
    ........
      r75736 | benjamin.peterson | 2009-10-26 16:29:02 -0500 (Mon, 26 Oct 2009) | 1 line

      using get() here is a bit pointless
    ........
      r75865 | benjamin.peterson | 2009-10-27 15:49:00 -0500 (Tue, 27 Oct 2009) | 1 line

      explain reason for warning
    ........
      r76059 | benjamin.peterson | 2009-11-02 11:43:47 -0600 (Mon, 02 Nov 2009) | 1 line

      tuples are no longer used for children
    ........
      r76060 | benjamin.peterson | 2009-11-02 11:55:40 -0600 (Mon, 02 Nov 2009) | 1 line

      revert r76059; apparently some fixers rely on Leaf no () for children
    ........
      r76061 | benjamin.peterson | 2009-11-02 12:06:17 -0600 (Mon, 02 Nov 2009) | 1 line

      make fix_tuple_params keep the tree valid #7253
    ........
  ................
................
  r76068 | benjamin.peterson | 2009-11-02 12:30:48 -0600 (Mon, 02 Nov 2009) | 24 lines

  Merged revisions 76064,76066-76067 via svnmerge from
  svn+ssh://pythondev@svn.python.org/python/trunk

  ................
    r76064 | benjamin.peterson | 2009-11-02 12:16:36 -0600 (Mon, 02 Nov 2009) | 1 line

    add space
  ................
    r76066 | benjamin.peterson | 2009-11-02 12:22:53 -0600 (Mon, 02 Nov 2009) | 9 lines

    Merged revisions 76065 via svnmerge from
    svn+ssh://pythondev@svn.python.org/sandbox/trunk/2to3/lib2to3

    ........
      r76065 | benjamin.peterson | 2009-11-02 12:21:25 -0600 (Mon, 02 Nov 2009) | 1 line

      don't print stuff in tests
    ........
  ................
    r76067 | benjamin.peterson | 2009-11-02 12:24:57 -0600 (Mon, 02 Nov 2009) | 1 line

    enable test_parser in lib2to3
  ................
................
2009-11-02 18:33:36 +00:00

387 lines
14 KiB
Python

# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from . import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = list(self.dfas.keys())
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.items():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in rawfirst:
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = list(self.dfas.keys())
names.sort()
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.items():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.items():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.items():
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print("Dump of NFA for", name)
todo = [start]
for i, state in enumerate(todo):
print(" State", i, state is finish and "(final)" or "")
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print(" -> %d" % j)
else:
print(" %s -> %d" % (label, j))
def dump_dfa(self, name, dfa):
print("Dump of DFA for", name)
for i, state in enumerate(dfa):
print(" State", i, state.isfinal and "(final)" or "")
for label, next in state.arcs.items():
print(" %s -> %d" % (label, dfa.index(next)))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = next(self.generator)
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = next(self.generator)
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(next(iter(nfaset)), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.items():
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.items():
if next is not other.arcs.get(label):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()