summaryrefslogtreecommitdiff
path: root/prolog/parser.py
diff options
context:
space:
mode:
authorTimotej Lazar <timotej.lazar@araneo.org>2015-04-01 16:19:02 +0200
committerAleš Smodiš <aless@guru.si>2015-08-11 14:26:03 +0200
commitb203887ecb557a02f4df7c47c2f46ee5dedb140e (patch)
tree75bf01760df8e3c217d1124c13abeb646433fb9c /prolog/parser.py
parent4cffac21da85476e3656b79a8da3a019429740f1 (diff)
Use Node class from monkey.util in parser
Also simplify rules for list expressions.
Diffstat (limited to 'prolog/parser.py')
-rw-r--r--prolog/parser.py68
1 files changed, 25 insertions, 43 deletions
diff --git a/prolog/parser.py b/prolog/parser.py
index ec42b5b..456539f 100644
--- a/prolog/parser.py
+++ b/prolog/parser.py
@@ -3,6 +3,7 @@
import ply.yacc as yacc
from .lexer import operators, tokens
from .util import Token
+from monkey.graph import Node
# PARSER
precedence = (
@@ -20,22 +21,9 @@ precedence = (
('nonassoc', 'LBRACKET', 'RBRACKET', 'LPAREN', 'RPAREN', 'COMMA', 'SEMI', 'PIPE', 'LBRACE', 'RBRACE')
)
-class Node:
- def __init__(self, type, children=None, data=None):
- self.type = type
- self.data = data
- self.children = children if children else []
-
- def __str__(self):
- val = self.type
- if self.children:
- val += ' ' + ' '.join([str(c) for c in self.children])
- val = '({})'.format(val)
- return val
-
def make_token(p, n):
lextoken = p.slice[n]
- return Token(lextoken.type, lextoken.value, lextoken.lexpos)
+ return Node(data=Token(lextoken.type, lextoken.value, lextoken.lexpos))
def p_text_empty(p):
'text : '
@@ -43,7 +31,7 @@ def p_text_empty(p):
def p_text_clause(p):
'text : text clause'
p[0] = p[1]
- p[0].children.append(p[2])
+ p[0].eout.append(p[2])
def p_clause_head(p):
'clause : head PERIOD'
@@ -53,7 +41,7 @@ def p_clause_rule(p):
p[0] = Node('clause', [p[1], make_token(p, 2), p[3], make_token(p, 4)])
def p_clause_error(p):
'clause : error PERIOD'
- p[0] = Node('error')
+ p[0] = Node('clause', [Node('error'), make_token(p, 2)])
def p_head(p):
'head : term'
@@ -64,12 +52,12 @@ def p_or_single(p):
p[0] = p[1]
def p_or_if(p):
'or : or SEMI if'
- if p[1].type == 'or':
+ if p[1].data == 'or':
p[0] = p[1]
else:
p[0] = Node('or', [p[1]])
- p[0].children.append(make_token(p, 2))
- p[0].children.append(p[3])
+ p[0].eout.append(make_token(p, 2))
+ p[0].eout.append(p[3])
def p_if_single(p):
'if : and'
@@ -83,18 +71,18 @@ def p_and_single(p):
p[0] = p[1]
def p_and_term(p):
'and : and COMMA term'
- if p[1].type == 'and':
+ if p[1].data == 'and':
p[0] = p[1]
else:
p[0] = Node('and', [p[1]])
- p[0].children.append(make_token(p, 2))
- p[0].children.append(p[3])
+ p[0].eout.append(make_token(p, 2))
+ p[0].eout.append(p[3])
def p_term_functor(p):
'term : functor LPAREN args RPAREN'
# No whitespace allowed between functor and LPAREN.
t2 = make_token(p, 2)
- if p[1].children[0].pos + len(p[1].children[0].val) < t2.pos:
+ if p[1].eout[0].data.pos + len(p[1].eout[0].data.val) < t2.data.pos:
raise SyntaxError('whitespace before ' + str(t2))
p[0] = Node('term', [p[1], t2, p[3], make_token(p, 4)])
def p_term_or(p):
@@ -152,31 +140,25 @@ def p_args_single(p):
def p_args_term(p):
'args : args COMMA term'
p[0] = p[1]
- p[0].children.append(make_token(p, 2))
- p[0].children.append(p[3])
-
-def p_list_empty(p):
- 'list : LBRACKET RBRACKET'
- p[0] = Node('list', [make_token(p, 1), make_token(p, 2)])
-def p_list_term(p):
- 'list : LBRACKET listexpr RBRACKET'
- p[0] = Node('list', [make_token(p, 1), p[2], make_token(p, 3)])
-def p_listexpr_single(p):
- 'listexpr : term'
- p[0] = Node('listexpr', [p[1]])
-def p_listexpr_term(p):
- 'listexpr : term COMMA listexpr'
- p[0] = Node('listexpr', [p[1], make_token(p, 2), p[3]])
-def p_listexpr_pipe(p):
- 'listexpr : term PIPE term'
- p[0] = Node('listexpr', [p[1], make_token(p, 2), p[3]])
+ p[0].eout.append(make_token(p, 2))
+ p[0].eout.append(p[3])
+
+def p_list(p):
+ 'list : LBRACKET args RBRACKET'
+ p[0] = Node('list', [make_token(p, 1)] + p[2].eout + [make_token(p, 3)])
+def p_list_tail(p):
+ 'list : LBRACKET args PIPE term RBRACKET'
+ p[0] = Node('list', [make_token(p, 1)] + p[2].eout + [make_token(p, 3), p[4], make_token(p, 5)])
def p_functor(p):
'functor : NAME'
p[0] = Node('functor', [make_token(p, 1)])
-def p_error(p):
- print('Syntax error in input: ' + str(p))
+def p_error(t):
+ if t is None:
+ print('unexpected end of file')
+ else:
+ print('{}: unexpected {}'.format(t.lexpos, t.value))
parser = yacc.yacc()