-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcriterion.py
172 lines (134 loc) · 5.69 KB
/
criterion.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
"""CSC148 Assignment 1
=== CSC148 Winter 2020 ===
Department of Computer Science,
University of Toronto
This code is provided solely for the personal and private use of
students taking the CSC148 course at the University of Toronto.
Copying for purposes other than this use is expressly prohibited.
All forms of distribution of this code, whether as given or with
any changes, are expressly prohibited.
Authors: Misha Schwartz, Mario Badr, Christine Murad, Diane Horton,
Sophia Huynh and Jaisie Sin
All of the files in this directory and all subdirectories are:
Copyright (c) 2020 Misha Schwartz, Mario Badr, Christine Murad, Diane Horton,
Sophia Huynh and Jaisie Sin
=== Module Description ===
This file contains classes that describe different types of criteria used to
evaluate a group of answers to a survey question.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, List
if TYPE_CHECKING:
from survey import Question, Answer
class InvalidAnswerError(Exception):
"""
Error that should be raised when an answer is invalid for a given question.
"""
class Criterion:
"""
An abstract class representing a criterion used to evaluate the quality of
a group based on the group members' answers for a given question.
"""
def score_answers(self, question: Question, answers: List[Answer]) -> float:
"""
Return score between 0.0 and 1.0 indicating the quality of the group of
<answers> to the question <question>.
Raise InvalidAnswerError if any answer in <answers> is not a valid
answer to <question>.
Each implementation of this abstract class will measure quality
differently.
"""
raise NotImplementedError
class HomogeneousCriterion(Criterion):
"""
A criterion used to evaluate the quality of a group based on the group
members' answers for a given question.
This criterion gives a higher score to answers that are more similar.
"""
def score_answers(self, question: Question, answers: List[Answer]) -> float:
"""
Return a score between 0.0 and 1.0 indicating how similar the answers in
<answers> are.
This score is calculated by finding the similarity of every
combination of two answers in <answers> and taking the average of all
of these similarity scores.
If there is only one answer in <answers> and it is valid return 1.0
since a single answer is always identical to itself.
Raise InvalidAnswerError if any answer in <answers> is not a valid
answer to <question>.
=== Precondition ===
len(answers) > 0
"""
acc1 = 0
acc2 = 0
if len(answers) == 1:
return 1.0
else:
for answer in answers:
if question.validate_answer(answer) is False:
raise InvalidAnswerError
lst = answers[:]
lst.remove(answer)
for sub in lst:
acc1 += question.get_similarity(answer, sub)
acc2 += 1
return acc1 / acc2
class HeterogeneousCriterion(HomogeneousCriterion):
""" A criterion used to evaluate the quality of a group based on the group
members' answers for a given question.
This criterion gives a higher score to answers that are more different.
"""
def score_answers(self, question: Question, answers: List[Answer]) -> float:
"""
Return a score between 0.0 and 1.0 indicating how similar the answers in
<answers> are.
This score is calculated by finding the similarity of every
combination of two answers in <answers>, finding the average of all
of these similarity scores, and then subtracting this average from 1.0
If there is only one answer in <answers> and it is valid, return 0.0
since a single answer is never identical to itself.
Raise InvalidAnswerError if any answer in <answers> is not a valid
answer to <question>.
=== Precondition ===
len(answers) > 0
"""
if len(answers) == 1:
return 0.0
else:
return 1.0 - super().score_answers(question, answers)
class LonelyMemberCriterion(Criterion):
""" A criterion used to measure the quality of a group of students
according to the group members' answers to a question. This criterion
assumes that a group is of high quality if no member of the group gives
a unique answer to a question.
"""
def score_answers(self, question: Question, answers: List[Answer]) -> float:
"""
Return score between 0.0 and 1.0 indicating the quality of the group of
<answers> to the question <question>.
The score returned will be zero iff there are any unique answers in
<answers> and will be 1.0 otherwise.
An answer is not unique if there is at least one other answer in
<answers> with identical content.
Raise InvalidAnswerError if any answer in <answers> is not a valid
answer to <question>.
=== Precondition ===
len(answers) > 0
"""
for answer in answers:
if not question.validate_answer(answer):
raise InvalidAnswerError
else:
lst = answers[:]
lst.remove(answer)
flag = True
for sub in lst:
if sub.content == answer.content:
flag = False
if flag:
return 0.0
return 1.0
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={'extra-imports': ['typing',
'survey']})