summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTimotej Lazar <timotej.lazar@araneo.org>2015-02-04 18:47:07 +0100
committerAleš Smodiš <aless@guru.si>2015-08-11 14:26:01 +0200
commit001739a6a93cceeb29f81ea2281ade0bef1a8645 (patch)
tree814a0890841ab55799329c76452ba25ce693caca
parent6a104bf8e2baea162d7f9f1d439dd8f671ddd413 (diff)
Move monkey.prolog to root module
-rw-r--r--monkey/edits.py2
-rwxr-xr-xmonkey/monkey.py6
-rwxr-xr-xmonkey/test.py4
-rw-r--r--monkey/util.py27
-rw-r--r--prolog/engine.py (renamed from monkey/prolog/engine.py)0
-rw-r--r--prolog/lexer.py (renamed from monkey/prolog/lexer.py)0
-rw-r--r--prolog/util.py (renamed from monkey/prolog/util.py)29
7 files changed, 34 insertions, 34 deletions
diff --git a/monkey/edits.py b/monkey/edits.py
index 3e0ae08..a614e4e 100644
--- a/monkey/edits.py
+++ b/monkey/edits.py
@@ -4,7 +4,7 @@ import collections
from .action import expand, parse
from .graph import Node
-from .prolog.util import rename_vars, stringify, tokenize
+from prolog.util import rename_vars, stringify, tokenize
from .util import get_line
# A line edit is a contiguous sequences of actions within a single line. This
diff --git a/monkey/monkey.py b/monkey/monkey.py
index 47bca5c..962f25d 100755
--- a/monkey/monkey.py
+++ b/monkey/monkey.py
@@ -4,9 +4,9 @@ import math
import time
from .edits import classify_edits
-from .prolog.engine import test
-from .prolog.util import compose, decompose, map_vars, rename_vars, stringify
-from .util import PQueue, Token
+from prolog.engine import test
+from prolog.util import Token, compose, decompose, map_vars, rename_vars, stringify
+from .util import PQueue
# Starting from [code], find a sequence of [edits] that transforms it into a
# correct predicate for [name]. Append [aux_code] when testing (available facts
diff --git a/monkey/test.py b/monkey/test.py
index 4bf5db9..0bb047e 100755
--- a/monkey/test.py
+++ b/monkey/test.py
@@ -11,8 +11,8 @@ from .action import parse
from .edits import classify_edits, edit_graph, get_edits_from_traces
from .graph import graphviz
from .monkey import fix
-from .prolog.engine import test
-from .prolog.util import compose, decompose, stringify
+from prolog.engine import test
+from prolog.util import compose, decompose, stringify
from .util import indent
# Load django models.
diff --git a/monkey/util.py b/monkey/util.py
index b8be2bb..6d57d29 100644
--- a/monkey/util.py
+++ b/monkey/util.py
@@ -1,6 +1,5 @@
#!/usr/bin/python3
-from collections import namedtuple
from heapq import heappush, heappop
import itertools
@@ -43,32 +42,6 @@ class PQueue(object):
def __len__(self):
return self.size
-# Stores a token's type and value, and optionally the position of the first
-# character in the lexed stream.
-class Token(namedtuple('Token', ['type', 'val', 'pos'])):
- __slots__ = ()
-
- # Custom constructor to support default parameters.
- def __new__(cls, type, val='', pos=None):
- return super(Token, cls).__new__(cls, type, val, pos)
-
- def __str__(self):
- return self.val
-
- # Ignore position when comparing tokens. There is probably a cleaner way of
- # doing these.
- __eq__ = lambda x, y: x[0] == y[0] and x[1] == y[1]
- __ne__ = lambda x, y: x[0] != y[0] or x[1] != y[1]
- __lt__ = lambda x, y: tuple.__lt__(x[0:2], y[0:2])
- __le__ = lambda x, y: tuple.__le__(x[0:2], y[0:2])
- __ge__ = lambda x, y: tuple.__ge__(x[0:2], y[0:2])
- __gt__ = lambda x, y: tuple.__gt__(x[0:2], y[0:2])
-
- # Only hash token's value (we don't care about position, and types are
- # determined by values).
- def __hash__(self):
- return hash(self[1])
-
# Return [n]th line in [text].
def get_line(text, n):
lines = text.split('\n')
diff --git a/monkey/prolog/engine.py b/prolog/engine.py
index af79535..af79535 100644
--- a/monkey/prolog/engine.py
+++ b/prolog/engine.py
diff --git a/monkey/prolog/lexer.py b/prolog/lexer.py
index 971e8a6..971e8a6 100644
--- a/monkey/prolog/lexer.py
+++ b/prolog/lexer.py
diff --git a/monkey/prolog/util.py b/prolog/util.py
index 8d8b266..7fb81e3 100644
--- a/monkey/prolog/util.py
+++ b/prolog/util.py
@@ -1,7 +1,34 @@
#!/usr/bin/python3
+from collections import namedtuple
+
from .lexer import lexer, operators
-from ..util import Token
+
+# Stores a token's type and value, and optionally the position of the first
+# character in the lexed stream.
+class Token(namedtuple('Token', ['type', 'val', 'pos'])):
+ __slots__ = ()
+
+ # Custom constructor to support default parameters.
+ def __new__(cls, type, val='', pos=None):
+ return super(Token, cls).__new__(cls, type, val, pos)
+
+ def __str__(self):
+ return self.val
+
+ # Ignore position when comparing tokens. There is probably a cleaner way of
+ # doing these.
+ __eq__ = lambda x, y: x[0] == y[0] and x[1] == y[1]
+ __ne__ = lambda x, y: x[0] != y[0] or x[1] != y[1]
+ __lt__ = lambda x, y: tuple.__lt__(x[0:2], y[0:2])
+ __le__ = lambda x, y: tuple.__le__(x[0:2], y[0:2])
+ __ge__ = lambda x, y: tuple.__ge__(x[0:2], y[0:2])
+ __gt__ = lambda x, y: tuple.__gt__(x[0:2], y[0:2])
+
+ # Only hash token's value (we don't care about position, and types are
+ # determined by values).
+ def __hash__(self):
+ return hash(self[1])
# Return a list of tokens in [text].
def tokenize(text):