summaryrefslogtreecommitdiff
path: root/abml/evaluate.py
blob: cb959edd6507d8812df76b4b186e48bd5b57d552 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import os.path
import argparse
import Orange
from Orange.evaluation import TestOnTestData, CA, AUC, LogLoss
import abml.rules_prolog as rp

import orangecontrib.evcrules.logistic as logistic
import orangecontrib.abml.abrules as rules
import orangecontrib.abml.argumentation as arg

parser = argparse.ArgumentParser(description='Learn and evaluate rules for Prolog programs.')
parser.add_argument('path', help='path to data directory')
args = parser.parse_args()
path = args.path

# load data
data = Orange.data.Table(os.path.join(path, 'programs-train'))

# create learner
rule_learner = rp.Rules4Prolog(path, 0.9)

# learn a classifier
classifier = rule_learner(data)

# save model
fmodel = open(os.path.join(path, 'model.txt'), "wt")
for r in classifier.rule_list:
    print(r, r.curr_class_dist, r.quality)
    fmodel.write("{} dist={} quality={}\n".format(str(r), str(r.curr_class_dist), r.quality))

# accuracy of model
testdata = Orange.data.Table(os.path.join(path, 'programs-test'))
predictions = classifier(testdata)
acc = 0
for i, p in enumerate(predictions):
    acc += p == testdata.Y[i]
acc /= len(testdata)
print("Accuracy on test data: ", acc)
predictions = classifier(data)
acc = 0
for i, p in enumerate(predictions):
    acc += p == data.Y[i]
acc /= len(data)
print("Accuracy on train data: ", acc)

# test model + other methodsstrong_piece_attack	defends_around_king
bayes = Orange.classification.NaiveBayesLearner()
logistic = Orange.classification.LogisticRegressionLearner()
tree = Orange.classification.TreeLearner()
random_forest = Orange.classification.RandomForestLearner()
svm = Orange.classification.SVMLearner()
cn2 = Orange.classification.rules.CN2UnorderedLearner()
learners = [rule_learner, logistic, bayes, cn2, tree, random_forest, svm]
res = TestOnTestData(data, testdata, learners)
ca = CA(res)
auc = AUC(res)
ll = LogLoss(res)

names = ['logrules', 'logistic', 'naive-bayes', 'cn2', 'tree', 'random-forest', 'svm']
scores = ""
scores += "CA\tAUC\tLogLoss\tMethod\n"
for ni, n in enumerate(names):
    scores += "{}\t{}\t{}\t{}\n".format(ca[ni], auc[ni], ll[ni], n)
print(scores)
fscores = open(os.path.join(path, 'scores.txt'), 'wt')
fscores.write(scores)

all_rules = classifier.rule_list
all_rules.sort(key = lambda r: r.quality, reverse=True)
rfile = open(os.path.join(path, 'rules.txt'), 'wt')
for r in all_rules:
    print(r, r.curr_class_dist, r.quality)
    rfile.write("{} {} {}\n".format(r, r.curr_class_dist, r.quality))