#!/usr/bin/python3 # coding=utf-8 # CodeQ: an online programming tutor. # Copyright (C) 2015 UL FRI # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU Affero General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) any # later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more # details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . """This tool processes all problem files, and outputs JSON files to describe problems, to be used as static web resources. It also adds missing problem identifiers in the database. Before running the script define the following environment variables, if defaults are not okay: CODEQ_WEB_OUTPUT - directory where to write the output, defaults to /var/www/html/data CODEQ_PROBLEMS - directory where you have codeq-problems checked out, defaults to /var/local/codeq-problems CODEQ_DB_HOST - hostname or IP of the database server, defaults to localhost CODEQ_DB_PORT - TCP port number of the database server, defaults to 5432 CODEQ_DB_DATABASE - name of the database, defaults to codeq CODEQ_DB_USER - database username, defaults to codeq CODEQ_DB_PASS - database password, defaults to c0d3q """ import os import traceback import sys import json import shutil # insert the parent directory, so the problem scripts find their modules sys.path.insert(0, os.sep.join(os.path.dirname(__file__).split(os.sep)[:-1])) import server.problems import db problems_path = os.environ.get('CODEQ_PROBLEMS') or '/var/local/codeq-problems' # where to find problems, the same as server.problems._path_prefix output_path = os.environ.get('CODEQ_WEB_OUTPUT') or '/var/www/html/data' # the base directory where to create subdirectories and output the files for web toplevel = {'style.css'} # files to copy from the toplevel directory (CODEQ_PROBLEMS) translations = {'sl', 'en'} # translations to seek (sl.py, en.py, ...) # default values (properties) for various types of items, also the list of properties to copy from modules language_props = { # for translation files inside the language subdirectory 'name': 'Name not set', 'description': 'Description not set', 'hint': {} } language_common_props = { # for common.py inside the language subdirectory 'id': None, # database ID of the language 'hint_type': {} # type definitions of common hints } group_props = { # for translation files inside the problem group subdirectory 'name': 'Name not set', 'description': 'Description not set' } group_common_props = { # for common.py inside the problem group subdirectory 'id': None, # database ID of the problem group 'number': 1 # display index of the problem group } problem_props = { # for translation files inside the problem subdirectory 'name': 'Name not set', 'slug': 'Slug not set', 'description': 'Description not set', 'plan': [], 'hint': {} } problem_common_props = { # for common.py inside the problem subdirectory 'id': None, # database ID of the problem 'number': 1, # display index of problems inside their groups 'visible': True, # whether the problem is enabled (disabled problems are excluded from the application) 'initial': None, # code to put in the editor when the user first opens this problem 'hint_type': {}, # type definitions of problem hints } if not os.path.exists(output_path): os.mkdir(output_path) def load_translation_data(package, defaults): result = {} path = os.sep.join(package.split('.')) for lang in translations: mod_path = os.path.join(problems_path, path, lang + '.py') if os.path.exists(mod_path) and os.path.isfile(mod_path): mod = None try: mod = server.problems.load_module(package + '.' + lang) except: traceback.print_exc(limit=0) lang_data = {} result[lang] = lang_data if mod is None: print('Could not load translation module {}'.format(package + '.' + lang)) for prop, default in defaults.items(): lang_data[prop] = default else: for prop, default in defaults.items(): lang_data[prop] = getattr(mod, prop, default) return result def load_common_data(package, defaults): mod = None path = os.sep.join(package.split('.')) mod_path = os.path.join(problems_path, path, 'common.py') if os.path.exists(mod_path) and os.path.isfile(mod_path): try: mod = server.problems.load_module(package + '.common') except: print('Error loading module {}:'.format(mod_path)) traceback.print_exc(limit=0) if mod is None: return None result = {} for prop, default in defaults.items(): result[prop] = getattr(mod, prop, default) return result def process_hint_type(hint_type): result = {} for identifier, hint in hint_type.items(): result[identifier] = hint.hint_type return result resource_tree = {} # a directory tree where branches are paths and leaves are filenames, it is rooted at the web data directory def copy_web_resources(package, dst_dir_fragments, filter=None): src_path = os.path.join(problems_path, os.sep.join(package.split('.'))) dst_path = output_path node = resource_tree for fragment in dst_dir_fragments: dst_path = os.path.join(dst_path, fragment) subnode = node.get(fragment) if subnode is None: subnode = {} node[fragment] = subnode node = subnode for filename in os.listdir(src_path): if filter and filename not in filter: continue if filename.startswith('.') or filename.endswith('.py'): continue full_filename = os.path.join(src_path, filename) if os.path.isfile(full_filename): shutil.copy(full_filename, dst_path) node[filename] = True def dump_language_defs(data, output_path): # sort groups and problems groups = data['groups'] for group in groups: group['problems'].sort(key=lambda p: p.get('number', 0)) groups.sort(key=lambda p: p.get('number', 0)) # write out the JSON file with open(os.path.join(output_path, 'language.json'), 'w') as f: json.dump(data, f, indent=2) conn = db.get_connection() cur = conn.cursor() def db_add(table, id, data): data = sorted(data.items()) cols = tuple([d[0] for d in data]) vals = tuple([d[1] for d in data]) cur.execute('select ' + ','.join(cols) + ' from ' + table + ' where id = %s', (id,)) row = cur.fetchone() if row is None: print('Inserting new {} in database: cols={} vals={}'.format(table, cols, vals)) args = ','.join(['%s'] * len(cols)) sql = 'insert into ' + table + ' (' + ','.join(cols) + ') values (' + args + ')' cur.execute(sql, vals) elif row != vals: print('Updating {} {} in database: cols={} vals={}'.format(table, id, cols, vals)) args = ','.join([col + ' = %s' for col in cols]) sql = 'update ' + table + ' set ' + args + ' where id = %s' cur.execute(sql, vals + (id,)) try: # copy top-level files (style etc.) copy_web_resources('', [], filter=toplevel) # get problem descriptors for lang_identifier in os.listdir(problems_path): lang_data = load_common_data(lang_identifier, language_common_props) if lang_data is None: continue # create language output directory lang_output_path = os.path.join(output_path, lang_identifier) if not os.path.exists(lang_output_path): os.mkdir(lang_output_path) lang_data['hint_type'] = process_hint_type(lang_data.get('hint_type', {})) lang_data['identifier'] = lang_identifier lang_data['groups'] = [] lang_data['translations'] = load_translation_data(lang_identifier, language_props) copy_web_resources(lang_identifier, [lang_identifier]) db_add('language', lang_data['id'], {'id': lang_data['id'], 'identifier': lang_identifier}) groups_path = os.path.join(problems_path, lang_identifier, 'problems') for group_identifier in os.listdir(groups_path): group_package = lang_identifier + '.problems.' + group_identifier group_data = load_common_data(group_package, group_common_props) if group_data is None: continue # create group directory group_output_path = os.path.join(lang_output_path, group_identifier) if not os.path.exists(group_output_path): os.mkdir(group_output_path) group_data['identifier'] = group_identifier group_data['problems'] = [] group_data['translations'] = load_translation_data(group_package, group_props) db_add('problem_group', group_data['id'], {'id': group_data['id'], 'identifier': group_identifier}) group_path = os.path.join(groups_path, group_identifier) for problem_identifier in os.listdir(group_path): problem_package = group_package + '.' + problem_identifier common_data = load_common_data(problem_package, problem_common_props) if common_data is None: continue common_data['identifier'] = problem_identifier if not common_data['visible']: continue # problem is not visible, do not generate anything del common_data['visible'] # we don't need this field in the GUI # save for later, to be used in problem_data below hint_type = process_hint_type(common_data['hint_type']) del common_data['hint_type'] # we don't need this field in the language index group_data['problems'].append(common_data) # load translations, and copy only problem names to the common data problem_translations = load_translation_data(problem_package, problem_props) name_translations = {} for key, value in problem_translations.items(): name_translations[key] = {'name': value.get('name')} common_data['translations'] = name_translations problem_data = { 'id': common_data['id'], 'identifier': problem_identifier, 'translations': problem_translations, 'initial': common_data['initial'], 'hint_type': hint_type, } problem_output_path = os.path.join(group_output_path, problem_identifier) if not os.path.exists(problem_output_path): os.mkdir(problem_output_path) with open(os.path.join(problem_output_path, 'problem.json'), 'w') as f: json.dump(problem_data, f, indent=2) copy_web_resources(problem_package, [lang_identifier, group_identifier, problem_identifier]) db_add('problem', problem_data['id'], { 'id': problem_data['id'], 'language_id': lang_data['id'], 'problem_group_id': group_data['id'], 'identifier': problem_identifier }) # add only non-empty problem groups if group_data['problems']: copy_web_resources(group_package, [lang_identifier, group_identifier]) lang_data['groups'].append(group_data) dump_language_defs(lang_data, lang_output_path) cur.close() conn.commit() except: conn.rollback() raise finally: db.return_connection(conn) # dump the tree of resources with open(os.path.join(output_path, 'resources.json'), 'w') as f: json.dump(resource_tree, f, indent=2)