summaryrefslogtreecommitdiff
path: root/aied2017/aied2017.tex
blob: a7ed5e11dbd14418dd72a551662c687127736b04 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
\documentclass{llncs}

\usepackage[utf8]{inputenc}

% reclaim some plain-text sanity
\usepackage{newunicodechar}
\newunicodechar{∧}{\ensuremath{\land}}
\newunicodechar{⇒}{\ensuremath{\Rightarrow}}
\newunicodechar{⋯}{\ensuremath{\cdots}}

\usepackage{fancyvrb}
\fvset{commandchars=\\\{\},baselinestretch=0.98,samepage=true,xleftmargin=2.5mm}
% WAT — I don’t even…
\makeatletter
\begingroup
\catcode`\`=\active
\gdef\FV@fontfamily@sf{%
  \def\FV@FontScanPrep{\FV@MakeActive\`}%
  \def\FV@FontFamily{\sffamily\edef`{{\string`}}}}
\endgroup
\makeatother

\usepackage{forest}
\usetikzlibrary{arrows.meta}

\usepackage{hyperref}

\newcommand\code[1]{\texttt{#1}}
\newcommand\red[1]{{\begingroup\color[rgb]{0.8,0.15,0.15}#1\endgroup}}
\newcommand\green[1]{{\begingroup\color[rgb]{0.2,0.7,0.2}#1\endgroup}}
\newcommand\hl[1]{\textbf{#1}}

\begin{document}

\title{Automatic extraction of AST patterns \\ for debugging student programs}
\author{Timotej Lazar, Martin Možina, Ivan Bratko}
\institute{University of Ljubljana, Faculty of Computer and Information Science, Slovenia}
\maketitle

\begin{abstract}
% motivation
When implementing a programming tutor, it is often difficult to manually consider all possible errors encountered by students. An alternative is to automatically learn a bug library of erroneous patterns from students’ programs.
% learning
We propose using abstract-syntax-tree (AST) patterns as features for learning rules to distinguish between correct and incorrect programs. These rules can be used for debugging student programs: rules for incorrect programs (buggy rules) contain patterns indicating mistakes, whereas each rule for correct programs covers a subset of submissions sharing the same solution strategy.
% generating hints
To generate hints, we first check all buggy rules and point out incorrect patterns. If no buggy rule matches, rules for correct programs are used to recognize the student’s intent and suggest patterns that still need to be implemented.
% evaluation
We evaluated our approach on past student programming data for a number of Prolog problems. For 31 out of 44 problems, the induced rules correctly classified over 85\% of programs based only on their structural features. For approximately 73\% of incorrect submissions, we were able to generate hints that were implemented by the student in some subsequent submission.
\\\\
\textbf{Keywords:} Programming tutors · Error diagnosis · Hint generation · Abstract syntax tree · Syntactic features
\end{abstract}

\input{introduction}
\input{background}
\input{patterns}
\input{method}
\input{evaluation}
\input{conclusion}

\bibliographystyle{splncs}
\bibliography{aied2017}

\end{document}