diff options
-rw-r--r-- | python/util.py | 30 |
1 files changed, 30 insertions, 0 deletions
diff --git a/python/util.py b/python/util.py index 5cde278..f04fcc2 100644 --- a/python/util.py +++ b/python/util.py @@ -19,6 +19,7 @@ import io import re from tokenize import tokenize, TokenError +import ast def get_tokens(code): """ Gets a list of tokens. """ @@ -28,6 +29,13 @@ def get_tokens(code): except TokenError: return [] +def all_tokens(code): + try: + stream = io.BytesIO(code.encode('utf-8')) + return [t for t in tokenize(stream.readline)] + except TokenError: + return [] + # Check if tokens contain a sequence of tokens (given as a list of strings). def has_token_sequence(tokens, sequence): for i in range(len(tokens)-len(sequence)+1): @@ -85,6 +93,28 @@ def get_exception_desc(exc): return [{'id':'error', 'args': {'message': exc}}] return None +def get_ast(code): + """ + Turn code into ast; use it when regular expressions on strings + are not enjoyable enough. + + """ + return ast.parse(code) + +def has_comprehension(tree): + """ Searches code for comprehensions and generators. """ + for n in ast.walk(tree): + if isinstance(n, ast.comprehension): + return True + return False + +def has_loop(tree): + """ Searches abstract syntax tree for loops (for and while). """ + for n in ast.walk(tree): + if isinstance(n, ast.For) or isinstance(n, ast.While): + return True + return False + if __name__ == '__main__': print(has_token_sequence(get_tokens('x + y >= 0'), ['>=', '0'])) print(has_token_sequence(get_tokens('x + y > 0'), ['>=', '0'])) |