Revision: 17556
Author:   [email protected]
Date:     Thu Nov  7 12:03:35 2013 UTC
Log:      Experimental parser: some cleanup and assertions for rule_parser

[email protected]

BUG=

Review URL: https://codereview.chromium.org/64213002
http://code.google.com/p/v8/source/detail?r=17556

Modified:
 /branches/experimental/parser/tools/lexer_generator/action_test.py
 /branches/experimental/parser/tools/lexer_generator/generator.py
 /branches/experimental/parser/tools/lexer_generator/regex_parser.py
 /branches/experimental/parser/tools/lexer_generator/rule_parser.py
 /branches/experimental/parser/tools/lexer_generator/rule_parser_test.py

=======================================
--- /branches/experimental/parser/tools/lexer_generator/action_test.py Thu Nov 7 09:17:36 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/action_test.py Thu Nov 7 12:03:35 2013 UTC
@@ -39,8 +39,8 @@
   builder = NfaBuilder()
   for k, v in parser_state.rules.items():
     graphs = []
-    for (graph, precedence, code, condition) in v['regex']:
- graphs.append(NfaBuilder.add_action(graph, (precedence, code, condition)))
+    for (graph, action) in v['regex']:
+      graphs.append(NfaBuilder.add_action(graph, action))
     nfa = builder.nfa(NfaBuilder.or_graphs(graphs))
     dfa = dfa_from_nfa(nfa)
     rule_map[k] = (nfa, dfa)
=======================================
--- /branches/experimental/parser/tools/lexer_generator/generator.py Thu Nov 7 10:20:45 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/generator.py Thu Nov 7 12:03:35 2013 UTC
@@ -79,13 +79,16 @@
   rule_map = {}
   builder = NfaBuilder()
   builder.set_character_classes(parser_state.character_classes)
+  assert 'default' in parser_state.rules
   for k, v in parser_state.rules.items():
+    assert 'default' in v
     graphs = []
-    for (graph, precedence, code, action) in v['regex']:
- graphs.append(NfaBuilder.add_action(graph, (precedence, code, action)))
-    rule_map[k] = builder.nfa(NfaBuilder.or_graphs(graphs))
+    for (graph, action) in v['regex']:
+      graphs.append(NfaBuilder.add_action(graph, action))
+    rule_map[k] = NfaBuilder.or_graphs(graphs)
   html_data = []
-  for rule_name, nfa in rule_map.items():
+  for rule_name, graph in rule_map.items():
+    nfa = builder.nfa(graph)
     (start, dfa_nodes) = nfa.compute_dfa()
     dfa = Dfa(start, dfa_nodes)
     html_data.append((rule_name, nfa, dfa))
=======================================
--- /branches/experimental/parser/tools/lexer_generator/regex_parser.py Thu Nov 7 08:57:51 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/regex_parser.py Thu Nov 7 12:03:35 2013 UTC
@@ -158,6 +158,6 @@
       RegexParser.__static_instance = parser
     try:
       return parser.parser.parse(data, lexer=parser.lexer.lexer)
-    except Exception as e:
+    except Exception:
       RegexParser.__static_instance = None
-      raise e
+      raise
=======================================
--- /branches/experimental/parser/tools/lexer_generator/rule_parser.py Thu Nov 7 08:57:51 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/rule_parser.py Thu Nov 7 12:03:35 2013 UTC
@@ -40,6 +40,7 @@
     self.character_classes = {}
     self.current_state = None
     self.rules = {}
+    self.transitions = set()

   def parse(self, string):
     return RuleParser.parse(string, self)
@@ -48,6 +49,8 @@

   tokens = RuleLexer.tokens
   __rule_precedence_counter = 0
+  __keyword_transitions = set([
+      'continue', 'break', 'terminate', 'terminate_illegal'])

   def __init__(self):
     self.__state = None
@@ -95,9 +98,15 @@
     '''transition_rule : composite_regex_or_default code action
                        | composite_regex_or_default empty action
                        | composite_regex_or_default code empty'''
-    rules = self.__state.rules[self.__state.current_state]
-    rule = (p[1], RuleParser.__rule_precedence_counter, p[2], p[3])
+    transition = p[3] if p[3] else 'continue'
+ if transition == 'continue' and self.__state.current_state == 'default':
+      transition = 'break'
+    if not transition in self.__keyword_transitions:
+      assert not transition == 'default'
+      self.__state.transitions.add(transition)
+    rule = (p[1], (RuleParser.__rule_precedence_counter, p[2], transition))
     RuleParser.__rule_precedence_counter += 1
+    rules = self.__state.rules[self.__state.current_state]
     if p[1] == 'default':
       assert not rules['default']
       rules['default'] = rule
@@ -197,7 +206,8 @@
     parser.__state = parser_state
     try:
       parser.parser.parse(data, lexer=parser.lexer.lexer)
-    except Exception as e:
+    except Exception:
       RuleParser.__static_instance = None
-      raise e
+      raise
+    assert parser_state.transitions <= set(parser_state.rules.keys())
     parser.__state = None
=======================================
--- /branches/experimental/parser/tools/lexer_generator/rule_parser_test.py Thu Nov 7 08:57:51 2013 UTC +++ /branches/experimental/parser/tools/lexer_generator/rule_parser_test.py Thu Nov 7 12:03:35 2013 UTC
@@ -44,8 +44,8 @@
 <cond1> alias <<cond2>>
 <cond2> /regex/ {body}
 <cond2> alias {body}
-<cond3> /regex/ {body} <<cond4>>
-<cond3> alias {body} <<cond4>>''')
+<cond3> /regex/ {body} <<cond1>>
+<cond3> alias {body} <<cond1>>''')

      self.assertTrue(len(self.state.aliases), 1)
      self.assertTrue('alias' in self.state.aliases)

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.

Reply via email to