diff options
-rwxr-xr-x | monkey/monkey.py | 11 | ||||
-rwxr-xr-x | monkey/test.py | 3 | ||||
-rw-r--r-- | prolog/engine.py | 39 |
3 files changed, 11 insertions, 42 deletions
diff --git a/monkey/monkey.py b/monkey/monkey.py index fd11868..f64a142 100755 --- a/monkey/monkey.py +++ b/monkey/monkey.py @@ -3,11 +3,20 @@ import math import time +import prolog.engine from .edits import classify_edits -from prolog.engine import test from prolog.util import Token, annotate, compose, map_vars, normalized, rename_vars, stringify from .util import PQueue +# Check whether all tests for problem [name] succeed. +def test(name, code): + try: + reply = prolog.engine.create_and_ask( + code=code, query='run_tests({})'.format(name)) + return reply.get('event') == 'success' + except Exception as ex: + return False + # Starting from [code], find a sequence of edits that transforms it into a # correct predicate for [name]. Append [aux_code] when testing (available facts # and predicates). diff --git a/monkey/test.py b/monkey/test.py index 1909f8d..6887a2c 100755 --- a/monkey/test.py +++ b/monkey/test.py @@ -9,8 +9,7 @@ from termcolor import colored from .edits import classify_edits, trace_graph from .graph import graphviz -from .monkey import fix, fix_hints -from prolog.engine import test +from .monkey import fix, fix_hints, test from prolog.util import annotate, compose, stringify from .util import indent diff --git a/prolog/engine.py b/prolog/engine.py index 048190a..6215bb2 100644 --- a/prolog/engine.py +++ b/prolog/engine.py @@ -105,45 +105,6 @@ def get_message(reply): message = re.sub(r'_G[0-9]*', '_', message) return reply['message'], message -# Test whether [code] is a correct solution for problem [name]. Runs all tests -# and returns a list of results. Raises an exception on error. -def test_all(name, code): - reply = create_and_ask(code=code, query="run_tests({}, Results)".format(name)) - - if reply.get('event') != 'success': - raise Exception('testing procedure failed') - - results = re.findall(r'(?:success|failure)\([^)]*\)', reply['data'][0]['Results']) - n_total = len(results) - n_passed = len([r for r in results if r.startswith('success')]) - return n_passed, n_total - -# Test whether [code] is a correct solution for problem [name]. Returns a bool -# and stops on first failure. -def test(name, code): - try: - reply = create_and_ask(code=code, query='run_tests({})'.format(name)) - return reply.get('event') == 'success' - except Exception as ex: - return False - -# Try to generate a random test case for problem [name] with [solution] that -# fails for [code]. Give up after [tries] attempts. -def create_failing_test(name, solution, code, tries=10): - try: - for i in range(tries): - reply = create_and_ask(code=solution, - query='create_test({}, Test)'.format(name)) - testcase = reply['data'][0]['Test'] - reply = create_and_ask(code=code, - query='run_test({}, Result, 0.1)'.format(testcase)) - result = reply['data'][0]['Result'] - if not result.startswith('success'): - return testcase - except Exception as ex: - pass - return None - # Basic sanity check. if __name__ == '__main__': engine = create(code='dup([],[]). dup([H|T],[H,H|TT]) :- dup(T,TT).') |