Skip to content

Commit 195414f

Browse files
authored
Add DeepGini Quantifier (#92)
This adds the *DeepGini* uncertainty quantifier. The implementation and this PR are part of the following paper: > Michael Weiss and Paolo Tonella, **Simple Techniques Work Surprisingly Well for Neural Network Test Prioritization and Active Learning (Replication Paper)**, Proceedings of the 30th ACM SIGSOFT International Symposium on Software Testing and Analysis. 2021. (forthcoming) DeepGini was proposed in the following paper: > Feng, Yang, et al. **Deepgini: prioritizing massive tests to enhance the robustness of deep neural networks.**, Proceedings of the 29th ACM SIGSOFT International Symposium on Software Testing and Analysis. 2020.
1 parent 09639e9 commit 195414f

File tree

4 files changed

+90
-1
lines changed

4 files changed

+90
-1
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,3 +16,4 @@
1616
/uncertainty_wizard.egg-info/
1717
/examples/temp-ensemble.txt
1818
**/__pycache__/
19+
/lf_logs/*

tests_unit/quantifiers_tests/test_one_shot_classifiers.py

Lines changed: 42 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,51 @@
88
QuantifierRegistry,
99
SoftmaxEntropy,
1010
)
11+
from uncertainty_wizard.quantifiers.one_shot_classifiers import DeepGini
1112
from uncertainty_wizard.quantifiers.quantifier import ProblemType
1213

1314

15+
class TestDeepGini(TestCase):
16+
def test_string_representation(self):
17+
self.assertTrue(isinstance(QuantifierRegistry.find("DeepGini"), DeepGini))
18+
self.assertTrue(isinstance(QuantifierRegistry.find("deep_gini"), DeepGini))
19+
20+
def test_is_confidence(self):
21+
self.assertFalse(DeepGini.is_confidence())
22+
self.assertFalse(DeepGini().is_confidence())
23+
24+
def test_samples_type_declaration(self):
25+
self.assertFalse(DeepGini.takes_samples())
26+
27+
def test_problem_type(self):
28+
self.assertEqual(DeepGini.problem_type(), ProblemType.CLASSIFICATION)
29+
30+
def test_quantification(self):
31+
input_batch = np.array(
32+
[
33+
[0.1, 0.2, 0.3, 0.4],
34+
[0.5, 0.1, 0.1, 0.3],
35+
[0.25, 0.25, 0.25, 0.25],
36+
[1.0, 0, 0, 0],
37+
[0, 1.0, 0, 0],
38+
]
39+
)
40+
41+
expected = np.array(
42+
[
43+
0.7, # https://bit.ly/301vmQ3
44+
0.64, # https://bit.ly/3qkHuGm
45+
0.75, # https://bit.ly/3wrPI0h
46+
0, # Trivial
47+
0, # Re-Ordering of previous
48+
]
49+
)
50+
51+
pred, unc = DeepGini.calculate(input_batch)
52+
assert np.all(pred == np.array([3, 0, 0, 0, 1]))
53+
assert np.all(unc == expected)
54+
55+
1456
class TestPCS(TestCase):
1557

1658
# =================

uncertainty_wizard/quantifiers/one_shot_classifiers.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -142,3 +142,43 @@ def calculate(cls, nn_outputs: np.ndarray):
142142
entropies = uwiz.quantifiers.predictive_entropy.entropy(nn_outputs, axis=1)
143143

144144
return calculated_predictions, entropies
145+
146+
147+
class DeepGini(UncertaintyQuantifier):
148+
"""DeepGini - Uncertainty (1 minus sum of squared softmax outputs).
149+
150+
151+
See Feng. et. al., "Deepgini: prioritizing massive tests to enhance
152+
the robustness of deep neural networks" for more information. ISSTA 2020.
153+
154+
The implementation is part of our paper:
155+
Michael Weiss and Paolo Tonella, Simple Techniques Work Surprisingly Well
156+
for Neural Network Test Prioritization and Active Learning (Replication Paper),
157+
ISSTA 2021. (forthcoming)"""
158+
159+
# docstr-coverage:inherited
160+
@classmethod
161+
def aliases(cls) -> List[str]:
162+
return ["deep_gini", "DeepGini"]
163+
164+
# docstr-coverage:inherited
165+
@classmethod
166+
def takes_samples(cls) -> bool:
167+
return False
168+
169+
# docstr-coverage:inherited
170+
@classmethod
171+
def is_confidence(cls) -> bool:
172+
return False
173+
174+
# docstr-coverage:inherited
175+
@classmethod
176+
def calculate(cls, nn_outputs: np.ndarray):
177+
predictions, _ = MaxSoftmax.calculate(nn_outputs)
178+
gini = 1 - np.sum(nn_outputs * nn_outputs, axis=1)
179+
return predictions, gini
180+
181+
# docstr-coverage:inherited
182+
@classmethod
183+
def problem_type(cls) -> ProblemType:
184+
return ProblemType.CLASSIFICATION

uncertainty_wizard/quantifiers/quantifier_registry.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
from .mean_softmax import MeanSoftmax
22
from .mutual_information import MutualInformation
3-
from .one_shot_classifiers import MaxSoftmax, PredictionConfidenceScore, SoftmaxEntropy
3+
from .one_shot_classifiers import (
4+
DeepGini,
5+
MaxSoftmax,
6+
PredictionConfidenceScore,
7+
SoftmaxEntropy,
8+
)
49
from .predictive_entropy import PredictiveEntropy
510
from .quantifier import Quantifier
611
from .regression_quantifiers import StandardDeviation
@@ -51,6 +56,7 @@ def find(cls, alias: str) -> Quantifier:
5156
QuantifierRegistry.register(MaxSoftmax())
5257
QuantifierRegistry.register(PredictionConfidenceScore())
5358
QuantifierRegistry.register(SoftmaxEntropy())
59+
QuantifierRegistry.register(DeepGini())
5460
QuantifierRegistry.register(VariationRatio())
5561
QuantifierRegistry.register(PredictiveEntropy())
5662
QuantifierRegistry.register(MutualInformation())

0 commit comments

Comments
 (0)