summaryrefslogtreecommitdiff
path: root/abml/evaluate.py
blob: 2e318fb5975b70616973c5d257f98cc2e5d279f9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import pickle
import argparse
import Orange
from Orange.evaluation import TestOnTestData, CA, AUC, LogLoss
import abml.rules_prolog as rp

import orangecontrib.evcrules.logistic as logistic
import orangecontrib.abml.abrules as rules
import orangecontrib.abml.argumentation as arg

parser = argparse.ArgumentParser(description='Learn and test rules for prolog programs.')
parser.add_argument('Name', type=str, help='Predicate name.')
args = parser.parse_args()
name = args.Name

# load data
data = Orange.data.Table('data/{}/programs-train'.format(name))

# create learner
rule_learner = rp.Rules4Prolog(name, 0.9)



# learn a classifier
classifier = rule_learner(data)

# save model
fmodel = open("data/{}/model.txt".format(name), "wt")
for r in classifier.rule_list:
    print(r, r.curr_class_dist, r.quality)
    fmodel.write("{} dist={} quality={}\n".format(str(r), str(r.curr_class_dist), r.quality))

# accuracy of model
testdata = Orange.data.Table('data/{}/programs-test'.format(name))
predictions = classifier(testdata)
acc = 0
for i, p in enumerate(predictions):
    acc += p == testdata.Y[i]
acc /= len(testdata)
print("Accuracy on test data: ", acc)
predictions = classifier(data)
acc = 0
for i, p in enumerate(predictions):
    acc += p == data.Y[i]
acc /= len(data)
print("Accuracy on train data: ", acc)

# test model + other methodsstrong_piece_attack	defends_around_king
bayes = Orange.classification.NaiveBayesLearner()
logistic = Orange.classification.LogisticRegressionLearner()
tree = Orange.classification.TreeLearner()
random_forest = Orange.classification.RandomForestLearner()
svm = Orange.classification.SVMLearner()
cn2 = Orange.classification.rules.CN2UnorderedLearner()
learners = [rule_learner, logistic, bayes, cn2, tree, random_forest, svm]
res = TestOnTestData(data, testdata, learners)
ca = CA(res)
auc = AUC(res)
ll = LogLoss(res)

names = ['logrules', 'logistic', 'naive-bayes', 'cn2', 'tree', 'random-forest', 'svm']
scores = ""
scores += "CA\tAUC\tLogLoss\tMethod\n"
for ni, n in enumerate(names):
    scores += "{}\t{}\t{}\t{}\n".format(ca[ni], auc[ni], ll[ni], n)
print(scores)
fscores = open("data/{}/scores.txt".format(name), "wt")
fscores.write(scores)

all_rules = classifier.rule_list
all_rules.sort(key = lambda r: r.quality, reverse=True)
rfile = open("data/{}/rules.txt".format(name), "wt")
for r in all_rules:
    print(r, r.curr_class_dist, r.quality)
    rfile.write("{} {} {}\n".format(r, r.curr_class_dist, r.quality))