From 347dd54b6dc63f8f3fd5b0032a09ed459d11f527 Mon Sep 17 00:00:00 2001 From: Timotej Lazar Date: Thu, 8 Oct 2015 16:10:19 +0200 Subject: Get problems from files in build_web_resources The script now gets the list of languages, groups and problems from the filesystem and inserts missing IDs in the database. --- scripts/add_problem.py | 85 --------------- scripts/build_web_resources.py | 229 ++++++++++++++++++++++------------------- 2 files changed, 122 insertions(+), 192 deletions(-) delete mode 100755 scripts/add_problem.py (limited to 'scripts') diff --git a/scripts/add_problem.py b/scripts/add_problem.py deleted file mode 100755 index d8462cf..0000000 --- a/scripts/add_problem.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/python3 -# coding=utf-8 - -import db -from .utils import filenamefy - -conn = db.get_connection() -try: - cur = conn.cursor() - try: - new_lidentifier = None - # Get or add language. - cur.execute('select id, name, identifier from language order by id asc') - languages = cur.fetchall() - print('Languages:') - for lid, lname, lidentifier in languages: - print(' {}: {}'.format(lid, lname)) - - new_lid = input("Enter language ID or 'n' for new): ") - if new_lid == 'n': - new_lname = input('Enter name of the new language: ') - new_lidentifier = filenamefy(new_lname) - cur.execute('insert into language (name, identifier) values (%s, %s) returning id', - (new_lname, new_lidentifier)) - new_lid = cur.fetchone()[0] - print('Added new language "{}" with ID {} and identifier {}'.format( - new_lname, new_lid, new_lidentifier)) - else: - new_lid = int(new_lid) - for lid, lname, lidentifier in languages: - print(lid, lname, lidentifier) - if lid == new_lid: - new_lidentifier = lidentifier - break - if new_lidentifier is None: - raise Exception('Language with ID {} does not exist'.format(new_lid)) - print('Selected langauge {}'.format(new_lid)) - print() - - # Get or add problem group. - new_gidentifier = None - cur.execute('select id, name, identifier from problem_group order by id asc') - groups = cur.fetchall() - print('Problem groups:') - for gid, gname, gidentifier in groups: - print(' {}: {}'.format(gid, gname)) - new_gid = input("Enter problem group ID or 'n' for new): ") - if new_gid == 'n': - new_gname = input('Enter name of the new problem group: ') - new_gidentifier = filenamefy(new_gname) - cur.execute('insert into problem_group (name, identifier) values (%s, %s) returning id', - (new_gname, new_gidentifier)) - new_gid = cur.fetchone()[0] - print('Added new problem group "{}" with ID {} and identifier {}'.format( - new_gname, new_gid, new_gidentifier)) - else: - new_gid = int(new_gid) - for gid, gname, gidentifier in groups: - if gid == new_gid: - new_gidentifier = gidentifier - break - if new_gidentifier is None: - raise Exception('Group with ID {} does not exist'.format(new_gid)) - print('Selected problem group {}'.format(new_gid)) - print() - - # Add problem. - new_pname = input('Enter name of the new problem: ') - new_pidentifier = filenamefy(new_pname) - cur.execute('insert into problem (language_id, problem_group_id, name, identifier, is_visible) values (%s, %s, %s, %s, %s) returning id', - (new_lid, new_gid, new_pname, new_pidentifier, True)) - new_pid = cur.fetchone()[0] - print('Added new problem "{}" with ID {} and identifier {}'.format( - new_pname, new_pid, new_pidentifier)) - print('Data files should be placed in "{}/problems/{}/{}"'.format( - new_lidentifier, new_gidentifier, new_pidentifier)) - - finally: - cur.close() - conn.commit() -except: - conn.rollback() - raise -finally: - db.return_connection(conn) diff --git a/scripts/build_web_resources.py b/scripts/build_web_resources.py index 9a4ccec..e581fd0 100644 --- a/scripts/build_web_resources.py +++ b/scripts/build_web_resources.py @@ -1,8 +1,9 @@ #!/usr/bin/python3 # coding=utf-8 -"""This tool processes all problem files and database data, and outputs JSON -files to describe problems, to be used as static web resources. +"""This tool processes all problem files, and outputs JSON files to describe +problems, to be used as static web resources. It also adds missing problem +identifiers in the database. Before running the script define the following environment variables, if defaults are not okay: CODEQ_WEB_OUTPUT - directory where to write the output, defaults to /var/www/html/data CODEQ_PROBLEMS - directory where you have codeq-problems checked out, defaults to /var/local/codeq-problems @@ -37,6 +38,7 @@ language_props = { # for translation files inside the language subdirectory 'hint': {} } language_common_props = { # for common.py inside the language subdirectory + 'id': None, # database ID of the language 'hint_type': {} # type definitions of common hints } group_props = { # for translation files inside the problem group subdirectory @@ -44,6 +46,7 @@ group_props = { # for translation files inside the problem group subdirectory 'description': 'Description not set' } group_common_props = { # for common.py inside the problem group subdirectory + 'id': None, # database ID of the problem group 'number': 1 # display index of the problem group } problem_props = { # for translation files inside the problem subdirectory @@ -54,6 +57,7 @@ problem_props = { # for translation files inside the problem subdirectory 'hint': {} } problem_common_props = { # for common.py inside the problem subdirectory + 'id': None, # database ID of the problem 'number': 1, # display index of problems inside their groups 'visible': True, # whether the problem is enabled (disabled problems are excluded from the application) 'hint_type': {} # type definitions of problem hints @@ -65,9 +69,6 @@ groups = {} # problem groups, info from database if not os.path.exists(output_path): os.mkdir(output_path) -conn = db.get_connection() -cur = conn.cursor() - def load_translation_data(package, defaults): result = {} path = os.sep.join(package.split('.')) @@ -138,121 +139,135 @@ def copy_web_resources(package, dst_dir_fragments): shutil.copy(full_filename, dst_path) node[filename] = True -cur.execute('select id, name, identifier from language') -row = cur.fetchone() -while row: - languages[row[0]] = {'id': row[0], 'name': row[1], 'identifier': row[2]} - row = cur.fetchone() - -cur.execute('select id, name, identifier from problem_group') -row = cur.fetchone() -while row: - groups[row[0]] = {'id': row[0], 'name': row[1], 'identifier': row[2]} - row = cur.fetchone() - -cur.execute('select id, language_id, problem_group_id, identifier from problem where is_visible = true order by language_id, problem_group_id') -previous_language_id = None -previous_group_id = None -lang_output_path = None -lang_index = None -row = cur.fetchone() - -def dump_language_defs(): - if lang_index: +def dump_language_defs(data, output_path): + if data: # sort groups and problems - groups = lang_index['groups'] + groups = data['groups'] for group in groups: group['problems'].sort(key=lambda p: p.get('number', 0)) groups.sort(key=lambda p: p.get('number', 0)) # write out the JSON file - with open(os.path.join(lang_output_path, 'language.json'), 'w') as f: - json.dump(lang_index, f, indent=2) - -while row: - # process language data, it all goes into the language directory language.json - try: - language_id = row[1] - if previous_language_id != language_id: - language = languages[language_id] - lang_identifier = language['identifier'] - language_path = os.path.join(problems_path, lang_identifier) - if not (os.path.exists(language_path) and os.path.isdir(language_path)): - print('ERROR: the directory for language {0} does not exist: {1}'.format(lang_identifier, language_path)) - continue - dump_language_defs() # dump the previous language index - lang_output_path = os.path.join(output_path, lang_identifier) - if not os.path.exists(lang_output_path): - os.mkdir(lang_output_path) - previous_language_id = language_id - problem_groups_list = [] - lang_index = load_common_data(lang_identifier, language_common_props) - lang_index['hint_type'] = process_hint_type(lang_index.get('hint_type', {})) # process type definitions for common hints - lang_index['id'] = language_id - lang_index['identifier'] = lang_identifier - lang_index['groups'] = problem_groups_list - lang_index['translations'] = load_translation_data(lang_identifier, language_props) - previous_group_id = None - copy_web_resources(lang_identifier, [lang_identifier]) - - # process problem group data, it all goes into the language directory language.json - group_id = row[2] - if previous_group_id != group_id: - group = groups[group_id] - group_identifier = group['identifier'] - group_path = os.path.join(language_path, 'problems', group_identifier) - if not (os.path.exists(group_path) and os.path.isdir(group_path)): - print('ERROR: the directory for group {0}/{1} does not exist: {2}'.format(lang_identifier, group_identifier, group_path)) + with open(os.path.join(output_path, 'language.json'), 'w') as f: + json.dump(data, f, indent=2) + +conn = db.get_connection() +cur = conn.cursor() + +def db_add(table, identifier, data): + cur.execute('select id from ' + table + ' where identifier = %s', (identifier,)) + row = cur.fetchone() + if row is None: + data = sorted(data.items()) + cols = [d[0] for d in data] + vals = [d[1] for d in data] + print('Inserting new {} in database: cols={} vals={}'.format(table, cols, vals)) + + args = ','.join(['%s'] * len(cols)) + sql = 'insert into ' + table + ' (' + ','.join(cols) + ') values (' + args + ')' + cur.execute(sql, vals) + +try: + # get problem descriptors + for lang_identifier in os.listdir(problems_path): + lang_path = os.path.join(problems_path, lang_identifier) + if (not os.path.exists(os.path.join(lang_path, 'common.py')) or + not os.path.exists(os.path.join(lang_path, 'problems'))): + continue + + # create language output directory + lang_output_path = os.path.join(output_path, lang_identifier) + if not os.path.exists(lang_output_path): + os.mkdir(lang_output_path) + + lang_data = load_common_data(lang_identifier, language_common_props) + lang_data['hint_type'] = process_hint_type(lang_data.get('hint_type', {})) + lang_data['identifier'] = lang_identifier + lang_data['groups'] = [] + lang_data['translations'] = load_translation_data(lang_identifier, language_props) + copy_web_resources(lang_identifier, [lang_identifier]) + db_add('language', lang_identifier, + {'id': lang_data['id'], 'name': lang_identifier, 'identifier': lang_identifier}) + + groups_path = os.path.join(lang_path, 'problems') + for group_identifier in os.listdir(groups_path): + group_path = os.path.join(groups_path, group_identifier) + if not os.path.exists(os.path.join(group_path, 'common.py')): continue + + # create group directory group_output_path = os.path.join(lang_output_path, group_identifier) if not os.path.exists(group_output_path): os.mkdir(group_output_path) + group_package = lang_identifier + '.problems.' + group_identifier - previous_group_id = group_id - problems_list = [] group_data = load_common_data(group_package, group_common_props) - group_data['id'] = group_id group_data['identifier'] = group_identifier - group_data['problems'] = problems_list + group_data['problems'] = [] group_data['translations'] = load_translation_data(group_package, group_props) - problem_groups_list.append(group_data) - copy_web_resources(group_package, [lang_identifier, group_identifier]) - - # process problem data, from common.py goes into the language directory language.json, others go into problem subdirectory's problem.json - problem_id = row[0] - problem_identifier = row[3] - problem_path = os.path.join(group_path, problem_identifier) - if not (os.path.exists(problem_path) and os.path.isdir(problem_path)): - print('ERROR: the directory for problem {0}/{1}/{2} does not exist: {3}'.format(lang_identifier, group_identifier, problem_identifier, group_path)) - continue - problem_package = group_package + '.' + problem_identifier - # load common data, for the language directory - common_data = load_common_data(problem_package, problem_common_props) - if not common_data['visible']: - continue # problem is not visible, do not generate anything - del common_data['visible'] # we don't need this field in the GUI - hint_type = process_hint_type(common_data['hint_type']) # save for later, to be used in problem_data below - del common_data['hint_type'] # we don't need this field in the language index - common_data['id'] = problem_id - common_data['identifier'] = problem_identifier - problems_list.append(common_data) - # load translations, and copy only problem names to the common data - problem_translations = load_translation_data(problem_package, problem_props) - name_translations = {} - for key, value in problem_translations.items(): - name_translations[key] = {'name': value.get('name')} - common_data['translations'] = name_translations - # dump translations, for the problem subdirectory's problem.json - problem_data = {'id': problem_id, 'identifier': problem_identifier, 'translations': problem_translations, 'hint_type': hint_type} - problem_output_path = os.path.join(group_output_path, problem_identifier) - if not os.path.exists(problem_output_path): - os.mkdir(problem_output_path) - with open(os.path.join(problem_output_path, 'problem.json'), 'w') as f: - json.dump(problem_data, f, indent=2) - copy_web_resources(problem_package, [lang_identifier, group_identifier, problem_identifier]) - finally: - row = cur.fetchone() - -dump_language_defs() # dump the last language index + db_add('problem_group', group_identifier, + {'id': group_data['id'], 'name': group_identifier, 'identifier': group_identifier}) + + for problem_identifier in os.listdir(group_path): + problem_path = os.path.join(group_path, problem_identifier) + if not os.path.exists(os.path.join(problem_path, 'common.py')): + continue + + problem_package = group_package + '.' + problem_identifier + common_data = load_common_data(problem_package, problem_common_props) + common_data['identifier'] = problem_identifier + + if not common_data['visible']: + continue # problem is not visible, do not generate anything + del common_data['visible'] # we don't need this field in the GUI + + # save for later, to be used in problem_data below + hint_type = process_hint_type(common_data['hint_type']) + del common_data['hint_type'] # we don't need this field in the language index + + group_data['problems'].append(common_data) + + # load translations, and copy only problem names to the common data + problem_translations = load_translation_data(problem_package, problem_props) + name_translations = {} + for key, value in problem_translations.items(): + name_translations[key] = {'name': value.get('name')} + common_data['translations'] = name_translations + + problem_data = { + 'id': common_data['id'], + 'identifier': problem_identifier, + 'translations': problem_translations, + 'hint_type': hint_type + } + + problem_output_path = os.path.join(group_output_path, problem_identifier) + if not os.path.exists(problem_output_path): + os.mkdir(problem_output_path) + with open(os.path.join(problem_output_path, 'problem.json'), 'w') as f: + json.dump(problem_data, f, indent=2) + copy_web_resources(problem_package, [lang_identifier, group_identifier, problem_identifier]) + db_add('problem', problem_identifier, { + 'id': problem_data['id'], + 'language_id': lang_data['id'], + 'problem_group_id': group_data['id'], + 'name': problem_identifier, + 'identifier': problem_identifier + }) + + # add only non-empty problem groups + if group_data['problems']: + copy_web_resources(group_package, [lang_identifier, group_identifier]) + lang_data['groups'].append(group_data) + + dump_language_defs(lang_data, lang_output_path) + cur.close() + conn.commit() +except: + conn.rollback() + raise +finally: + db.return_connection(conn) + # dump the tree of resources with open(os.path.join(output_path, 'resources.json'), 'w') as f: json.dump(resource_tree, f, indent=2) -- cgit v1.2.1