1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
|
# CodeQ: an online programming tutor.
# Copyright (C) 2015 UL FRI
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
# Stores a token's type and value, and optionally the position of the first
# character in the lexed stream.
class Token(namedtuple('Token', ['type', 'val', 'pos', 'rule', 'part', 'stop'])):
__slots__ = ()
# Custom constructor to support default parameters.
def __new__(cls, type, val='', pos=None, rule=None, part=None, stop=False):
return super(Token, cls).__new__(cls, type, val, pos, rule, part, stop)
def __str__(self):
return self.val
# Only consider type and value when comparing tokens. There is probably a
# cleaner way of doing this.
__eq__ = lambda x, y: x[0] == y[0] and x[1] == y[1]
__ne__ = lambda x, y: x[0] != y[0] or x[1] != y[1]
__lt__ = lambda x, y: tuple.__lt__(x[0:2], y[0:2])
__le__ = lambda x, y: tuple.__le__(x[0:2], y[0:2])
__ge__ = lambda x, y: tuple.__ge__(x[0:2], y[0:2])
__gt__ = lambda x, y: tuple.__gt__(x[0:2], y[0:2])
# Only hash token's value (we don't care about position, and types are
# determined by values).
def __hash__(self):
return hash(self[1])
# Return a copy of this token, possibly modifying some fields.
def clone(self, type=None, val=None, pos=None, rule=None, part=None, stop=None):
return Token(self.type if type is None else type,
self.val if val is None else val,
self.pos if pos is None else pos,
self.rule if rule is None else rule,
self.part if part is None else part,
self.stop if stop is None else stop)
from .lexer import lexer, operators
from .parser import parser
def parse(code):
try:
return parser.parse(code)
except SyntaxError:
return None
# Return a list of tokens in [text].
def tokenize(text):
lexer.input(text)
return [Token(t.type, t.value, t.lexpos) for t in lexer]
# Return a one-line string representation of [tokens].
def stringify(tokens):
def token_str(t):
if t.type in ('PERIOD', 'COMMA'):
return str(t) + ' '
if t.type in operators.values():
return ' ' + str(t) + ' '
return str(t)
return ''.join(map(token_str, tokens))
# Lex [code] into tokens with rule indexes and stop markers.
def annotate(code):
rule = 0
part = 0
parens = [] # stack of currently open parens/brackets/braces
in_parens = 0 # COMMA means a new part if this is 0
token = None
lexer.input(code)
for t in lexer:
tok_rule = rule
tok_part = part
tok_stop = True
if t.type == 'PERIOD': # .
rule += 1
part = 0
in_parens = 0
parens = []
elif t.type in ('FROM', 'SEMI'): # :- ;
part += 1
elif t.type == 'COMMA': # ,
if not parens or in_parens == 0:
part += 1
else:
tok_stop = False
# Handle left parens.
elif t.type == 'LPAREN': # (
if token and token.type == 'NAME': # name(
tok_stop = False
parens.append('COMPOUND')
in_parens += 1
else:
parens.append(t.type) # …, (
elif t.type == 'LBRACKET': # [
tok_stop = False
parens.append(t.type)
in_parens += 1
elif t.type == 'LBRACE': # {
parens.append(t.type)
# Handle right parens.
elif t.type == 'RPAREN': # )
if parens:
if parens[-1] == 'COMPOUND': # name(…)
tok_stop = False
parens.pop()
in_parens -= 1
elif parens[-1] == 'LPAREN': # (…)
parens.pop()
elif t.type == 'RBRACKET': # ]
if parens and parens[-1] == 'LBRACKET': # […]
tok_stop = False
parens.pop()
in_parens -= 1
elif t.type == 'RBRACE': # }
if parens and parens[-1] == 'LBRACE': # {…}
parens.pop()
# Normal tokens.
else:
tok_stop = False
token = Token(t.type, t.value, t.lexpos, tok_rule, tok_part, tok_stop)
yield token
# Format a list of annotated [tokens].
def compose(tokens):
code = ''
prev = None
for t in tokens:
if t.type == 'SEMI':
code += '\n '
if prev and (prev.part != t.part or prev.rule != t.rule):
code += '\n'
if t.part:
code += ' '
if t.type in ('PERIOD', 'COMMA'):
code += t.val + ' '
elif t.type in operators.values():
code += ' ' + t.val + ' '
else:
code += t.val
prev = t
return code.strip()
# Rename variables in [tokens] to A0, A1, A2,… in order of appearance.
def rename_vars(tokens, names=None):
if names is None:
names = {}
next_id = len(names)
# Return a new list.
tokens = list(tokens)
for i, t in enumerate(tokens):
if t.type == 'PERIOD':
names.clear()
next_id = 0
elif t.type == 'VARIABLE':
if t.val.startswith('_'):
tokens[i] = t.clone(val='A{}'.format(next_id))
next_id += 1
else:
cur_name = t.val
if cur_name not in names:
names[cur_name] = 'A{}'.format(next_id)
next_id += 1
tokens[i] = t.clone(val=names[cur_name])
return tokens
# Helper function to remove trailing punctuation from lines and rename
# variables to A1,A2,A3,… (potentially using [var_names]). Return a tuple.
def normalized(line, var_names=None):
# Remove trailing punctuation.
i = len(line)
while i > 0:
if line[i-1].type not in ('COMMA', 'PERIOD', 'SEMI'):
break
i -= 1
return tuple(rename_vars(line[:i], var_names))
# Map "formal" variable names in the edit a→b to actual names in code [tokens].
# The set [variables] contains all variable names in the current scope. These
# are used in cases such as [A]→[A,B], where the edit introduces new variables.
# Return a new version of b with actual variable names.
def map_vars(a, b, tokens, variables):
mapping = {}
new_index = 0
for i in range(len(a)):
if tokens[i].type == 'VARIABLE':
formal_name = a[i].val
if tokens[i].val != '_':
actual_name = tokens[i].val
else:
actual_name = 'New'+str(new_index)
new_index += 1
mapping[formal_name] = actual_name
remaining_formal = [t.val for t in b if t.type == 'VARIABLE' and t.val not in mapping.keys()]
remaining_actual = [var for var in variables if var not in mapping.values()]
while len(remaining_actual) < len(remaining_formal):
remaining_actual.append('New'+str(new_index))
new_index += 1
for i, formal_name in enumerate(remaining_formal):
mapping[formal_name] = remaining_actual[i]
return [t if t.type != 'VARIABLE' else t.clone(val=mapping[t.val]) for t in b]
# Basic sanity check.
if __name__ == '__main__':
var_names = {}
before = rename_vars(tokenize("dup([A0|A1], [A2|A3])"), var_names)
after = rename_vars(tokenize("dup([A0|A1], [A5, A4|A3])"), var_names)
line = lines[0]
variables = [t.val for t in tokenize(code) if t.type == 'VARIABLE']
mapped = map_vars(before, after, line, variables)
print(mapped)
|