summaryrefslogtreecommitdiff
path: root/scripts/build_web_resources.py
blob: 9a4cceca3a44e2e50f4c0db7764dfe07114c08f4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
#!/usr/bin/python3
# coding=utf-8

"""This tool processes all problem files and database data, and outputs JSON
files to describe problems, to be used as static web resources.
Before running the script define the following environment variables, if defaults are not okay:
CODEQ_WEB_OUTPUT - directory where to write the output, defaults to /var/www/html/data
CODEQ_PROBLEMS - directory where you have codeq-problems checked out, defaults to /var/local/codeq-problems
CODEQ_DB_HOST - hostname or IP of the database server, defaults to localhost
CODEQ_DB_PORT - TCP port number of the database server, defaults to 5432
CODEQ_DB_DATABASE - name of the database, defaults to codeq
CODEQ_DB_USER - database username, defaults to codeq
CODEQ_DB_PASS - database password, defaults to c0d3q
"""

import os
import traceback
import sys
import json
import shutil

# insert the parent directory, so the problem scripts find their modules
sys.path.insert(0, os.sep.join(os.path.dirname(__file__).split(os.sep)[:-1]))

import server.problems
import db

problems_path = os.environ.get('CODEQ_PROBLEMS') or '/var/local/codeq-problems'  # where to find problems, the same as server.problems._path_prefix
output_path = os.environ.get('CODEQ_WEB_OUTPUT') or '/var/www/html/data'  # the base directory where to create subdirectories and output the files for web

translations = {'sl', 'en'}  # translations to seek (sl.py, en.py, ...)

# default values (properties) for various types of items, also the list of properties to copy from modules
language_props = {  # for translation files inside the language subdirectory
    'name': 'Name not set',
    'description': 'Description not set',
    'hint': {}
}
language_common_props = {  # for common.py inside the language subdirectory
    'hint_type': {}  # type definitions of common hints
}
group_props = {  # for translation files inside the problem group subdirectory
    'name': 'Name not set',
    'description': 'Description not set'
}
group_common_props = {  # for common.py inside the problem group subdirectory
    'number': 1  # display index of the problem group
}
problem_props = {  # for translation files inside the problem subdirectory
    'name': 'Name not set',
    'slug': 'Slug not set',
    'description': 'Description not set',
    'plan': [],
    'hint': {}
}
problem_common_props = {  # for common.py inside the problem subdirectory
    'number': 1,  # display index of problems inside their groups
    'visible': True,  # whether the problem is enabled (disabled problems are excluded from the application)
    'hint_type': {}  # type definitions of problem hints
}

languages = {}  # programming languages, info from database
groups = {}  # problem groups, info from database

if not os.path.exists(output_path):
    os.mkdir(output_path)

conn = db.get_connection()
cur = conn.cursor()

def load_translation_data(package, defaults):
    result = {}
    path = os.sep.join(package.split('.'))
    for lang in translations:
        mod_path = os.path.join(problems_path, path, lang + '.py')
        if os.path.exists(mod_path) and os.path.isfile(mod_path):
            mod = None
            try:
                mod = server.problems.load_module(package + '.' + lang)
            except:
                traceback.print_exc()
            lang_data = {}
            result[lang] = lang_data
            if mod is None:
                print('Could not load module {}'.format(package + '.' + lang))
                for prop, default in defaults.items():
                    lang_data[prop] = default
            else:
                for prop, default in defaults.items():
                    lang_data[prop] = getattr(mod, prop, default)
    return result

def load_common_data(package, defaults):
    result = {}
    path = os.sep.join(package.split('.'))
    mod_path = os.path.join(problems_path, path, 'common.py')
    if os.path.exists(mod_path) and os.path.isfile(mod_path):
        mod = None
        try:
            mod = server.problems.load_module(package + '.common')
        except:
            traceback.print_exc()
        if mod is None:
            print('Could not load module {}'.format(package + '.common'))
            for prop, default in defaults.items():
                result[prop] = default
        else:
            for prop, default in defaults.items():
                result[prop] = getattr(mod, prop, default)
    else:
        print('Module {} does not exist'.format(package + '.common'))
        for prop, default in defaults.items():
            result[prop] = default
    return result

def process_hint_type(hint_type):
    result = {}
    for identifier, hint in hint_type.items():
        result[identifier] = hint.hint_type
    return result

resource_tree = {}  # a directory tree where branches are paths and leaves are filenames, it is rooted at the web data directory
def copy_web_resources(package, dst_dir_fragments):
    src_path = os.path.join(problems_path, os.sep.join(package.split('.')))
    dst_path = output_path
    node = resource_tree
    for fragment in dst_dir_fragments:
        dst_path = os.path.join(dst_path, fragment)
        subnode = node.get(fragment)
        if subnode is None:
            subnode = {}
            node[fragment] = subnode
        node = subnode
    for filename in os.listdir(src_path):
        if not filename.startswith('.') and not filename.endswith('.py'):
            full_filename = os.path.join(src_path, filename)
            if os.path.isfile(full_filename):
                shutil.copy(full_filename, dst_path)
                node[filename] = True

cur.execute('select id, name, identifier from language')
row = cur.fetchone()
while row:
    languages[row[0]] = {'id': row[0], 'name': row[1], 'identifier': row[2]}
    row = cur.fetchone()

cur.execute('select id, name, identifier from problem_group')
row = cur.fetchone()
while row:
    groups[row[0]] = {'id': row[0], 'name': row[1], 'identifier': row[2]}
    row = cur.fetchone()

cur.execute('select id, language_id, problem_group_id, identifier from problem where is_visible = true order by language_id, problem_group_id')
previous_language_id = None
previous_group_id = None
lang_output_path = None
lang_index = None
row = cur.fetchone()

def dump_language_defs():
    if lang_index:
        # sort groups and problems
        groups = lang_index['groups']
        for group in groups:
            group['problems'].sort(key=lambda p: p.get('number', 0))
        groups.sort(key=lambda p: p.get('number', 0))
        # write out the JSON file
        with open(os.path.join(lang_output_path, 'language.json'), 'w') as f:
            json.dump(lang_index, f, indent=2)

while row:
    # process language data, it all goes into the language directory language.json
    try:
        language_id = row[1]
        if previous_language_id != language_id:
            language = languages[language_id]
            lang_identifier = language['identifier']
            language_path = os.path.join(problems_path, lang_identifier)
            if not (os.path.exists(language_path) and os.path.isdir(language_path)):
                print('ERROR: the directory for language {0} does not exist: {1}'.format(lang_identifier, language_path))
                continue
            dump_language_defs()  # dump the previous language index
            lang_output_path = os.path.join(output_path, lang_identifier)
            if not os.path.exists(lang_output_path):
                os.mkdir(lang_output_path)
            previous_language_id = language_id
            problem_groups_list = []
            lang_index = load_common_data(lang_identifier, language_common_props)
            lang_index['hint_type'] = process_hint_type(lang_index.get('hint_type', {}))  # process type definitions for common hints
            lang_index['id'] = language_id
            lang_index['identifier'] = lang_identifier
            lang_index['groups'] = problem_groups_list
            lang_index['translations'] = load_translation_data(lang_identifier, language_props)
            previous_group_id = None
            copy_web_resources(lang_identifier, [lang_identifier])

        # process problem group data, it all goes into the language directory language.json
        group_id = row[2]
        if previous_group_id != group_id:
            group = groups[group_id]
            group_identifier = group['identifier']
            group_path = os.path.join(language_path, 'problems', group_identifier)
            if not (os.path.exists(group_path) and os.path.isdir(group_path)):
                print('ERROR: the directory for group {0}/{1} does not exist: {2}'.format(lang_identifier, group_identifier, group_path))
                continue
            group_output_path = os.path.join(lang_output_path, group_identifier)
            if not os.path.exists(group_output_path):
                os.mkdir(group_output_path)
            group_package = lang_identifier + '.problems.' + group_identifier
            previous_group_id = group_id
            problems_list = []
            group_data = load_common_data(group_package, group_common_props)
            group_data['id'] = group_id
            group_data['identifier'] = group_identifier
            group_data['problems'] = problems_list
            group_data['translations'] = load_translation_data(group_package, group_props)
            problem_groups_list.append(group_data)
            copy_web_resources(group_package, [lang_identifier, group_identifier])

        # process problem data, from common.py goes into the language directory language.json, others go into problem subdirectory's problem.json
        problem_id = row[0]
        problem_identifier = row[3]
        problem_path = os.path.join(group_path, problem_identifier)
        if not (os.path.exists(problem_path) and os.path.isdir(problem_path)):
            print('ERROR: the directory for problem {0}/{1}/{2} does not exist: {3}'.format(lang_identifier, group_identifier, problem_identifier, group_path))
            continue
        problem_package = group_package + '.' + problem_identifier
        # load common data, for the language directory
        common_data = load_common_data(problem_package, problem_common_props)
        if not common_data['visible']:
            continue  # problem is not visible, do not generate anything
        del common_data['visible']  # we don't need this field in the GUI
        hint_type = process_hint_type(common_data['hint_type'])  # save for later, to be used in problem_data below
        del common_data['hint_type']  # we don't need this field in the language index
        common_data['id'] = problem_id
        common_data['identifier'] = problem_identifier
        problems_list.append(common_data)
        # load translations, and copy only problem names to the common data
        problem_translations = load_translation_data(problem_package, problem_props)
        name_translations = {}
        for key, value in problem_translations.items():
            name_translations[key] = {'name': value.get('name')}
        common_data['translations'] = name_translations
        # dump translations, for the problem subdirectory's problem.json
        problem_data = {'id': problem_id, 'identifier': problem_identifier, 'translations': problem_translations, 'hint_type': hint_type}
        problem_output_path = os.path.join(group_output_path, problem_identifier)
        if not os.path.exists(problem_output_path):
            os.mkdir(problem_output_path)
        with open(os.path.join(problem_output_path, 'problem.json'), 'w') as f:
            json.dump(problem_data, f, indent=2)
        copy_web_resources(problem_package, [lang_identifier, group_identifier, problem_identifier])
    finally:
        row = cur.fetchone()

dump_language_defs()  # dump the last language index
# dump the tree of resources
with open(os.path.join(output_path, 'resources.json'), 'w') as f:
    json.dump(resource_tree, f, indent=2)