Skip to content

Commit

Permalink
Remove adcc and auc metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
GalyaZalesskaya committed Aug 19, 2024
1 parent dbc8686 commit fc55e93
Show file tree
Hide file tree
Showing 6 changed files with 20 additions and 396 deletions.
104 changes: 0 additions & 104 deletions openvino_xai/metrics/adcc.py

This file was deleted.

110 changes: 0 additions & 110 deletions openvino_xai/metrics/insertion_deletion_auc.py

This file was deleted.

28 changes: 14 additions & 14 deletions openvino_xai/metrics/pointing_game.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,15 @@ class PointingGame:
@staticmethod
def pointing_game(saliency_map: np.ndarray, gt_bbox: Tuple[int, int, int, int]) -> bool:
"""
Implements the Pointing Game metric using bounding boxes.
Implements the Pointing Game metric using bounding boxes. Returns a boolean indicating
if any of the most salient point falls within the ground truth bounding box.
Parameters:
- saliency_map: A 2D numpy array representing the saliency map for the image.
- gt_bbox: A tuple (x, y, w, h) representing the bounding box of the ground truth object.
Returns:
- hit: A boolean indicating if any of the most salient point falls within the ground truth bounding box.
:param saliency_map: A 2D numpy array representing the saliency map for the image.
:type saliency_map: np.ndarray
:param gt_bbox: A tuple (x, y, w, h) representing the bounding box of the ground truth object.
:type gt_bbox: Tuple[int, int, int, int]
"""
# TODO: Support a case with multiple bounding boxes for one imege
x, y, w, h = gt_bbox

# Find the most salient points in the saliency map
Expand All @@ -31,17 +31,17 @@ def evaluate(self, saliency_maps: List[np.ndarray], gt_bboxes: List[Tuple[int, i
"""
Evaluates the Pointing Game metric over a set of images.
Parameters:
- saliency_maps: A list of 2D numpy arrays representing the saliency maps.
- ground_truth_bbs: A list of bounding box of the ground truth object.
Returns:
- score: The Pointing Game accuracy score over the dataset.
:param saliency_maps: A list of 2D numpy arrays representing the saliency maps.
:type saliency_maps: List[np.ndarray]
:param gt_bboxes: A list of bounding box of the ground truth objects for each image.
:type gt_bboxes: List[Tuple[int, int, int, int]]
"""
assert len(saliency_maps) == len(
gt_bboxes
), "Number of saliency maps and ground truth bounding boxes must match."

hits = sum([self.pointing_game(s_map, gt_map) for s_map, gt_map in zip(saliency_maps, gt_bboxes)])
hits = sum(
[self.pointing_game(s_map, image_gt_bboxes) for s_map, image_gt_bboxes in zip(saliency_maps, gt_bboxes)]
)
score = hits / len(saliency_maps)
return score
46 changes: 6 additions & 40 deletions tests/regression/test_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
# SPDX-License-Identifier: Apache-2.0

import json
from typing import Callable, List, Mapping

import cv2
import openvino as ov
Expand All @@ -11,21 +10,13 @@
from openvino_xai import Task
from openvino_xai.common.utils import retrieve_otx_model
from openvino_xai.explainer.explainer import Explainer, ExplainMode
from openvino_xai.explainer.utils import get_postprocess_fn, get_preprocess_fn, sigmoid
from openvino_xai.methods.black_box.base import Preset
from openvino_xai.metrics.adcc import ADCC
from openvino_xai.metrics.insertion_deletion_auc import InsertionDeletionAUC
from openvino_xai.explainer.utils import get_preprocess_fn
from openvino_xai.metrics.pointing_game import PointingGame
from tests.unit.explanation.test_explanation_utils import VOC_NAMES

MODEL_NAME = "mlc_mobilenetv3_large_voc"


def postprocess_fn(x: Mapping):
x = sigmoid(x)
return x[0]


def load_gt_bboxes(class_name="person"):
with open("tests/assets/cheetah_person_coco.json", "r") as f:
coco_anns = json.load(f)
Expand All @@ -39,11 +30,6 @@ def load_gt_bboxes(class_name="person"):
return category_gt_bboxes


def postprocess_fn(x: Mapping):
x = sigmoid(x)
return x[0]


class TestDummyRegression:
image = cv2.imread("tests/assets/cheetah_person.jpg")

Expand All @@ -59,15 +45,10 @@ class TestDummyRegression:

@pytest.fixture(autouse=True)
def setup(self, fxt_data_root):
self.data_dir = fxt_data_root
retrieve_otx_model(self.data_dir, MODEL_NAME)
model_path = self.data_dir / "otx_models" / (MODEL_NAME + ".xml")
core = ov.Core()
model = core.read_model(model_path)
compiled_model = core.compile_model(model=model, device_name="AUTO")

self.auc = InsertionDeletionAUC(compiled_model, self.preprocess_fn, postprocess_fn)
self.adcc = ADCC(model, compiled_model, self.preprocess_fn, postprocess_fn)
data_dir = fxt_data_root
retrieve_otx_model(data_dir, MODEL_NAME)
model_path = data_dir / "otx_models" / (MODEL_NAME + ".xml")
model = ov.Core().read_model(model_path)

self.explainer = Explainer(
model=model,
Expand All @@ -91,16 +72,6 @@ def test_explainer_image(self):
score = self.pointing_game.evaluate(saliency_maps, self.gt_bboxes)
assert score > 0.5

insertion_auc_score = self.auc.insertion_auc_image(self.image, saliency_maps[0], self.steps)
assert insertion_auc_score >= 0.9

deletion_auc_score = self.auc.deletion_auc_image(self.image, saliency_maps[0], self.steps)
assert deletion_auc_score >= 0.2

adcc_score = self.adcc.adcc(self.image, saliency_maps[0])
# Why metric for real image and detector is worse then for a random image?
assert adcc_score >= 0.1

def test_explainer_images(self):
# TODO support multiple classes
images = [self.image, self.image]
Expand All @@ -115,10 +86,5 @@ def test_explainer_images(self):
saliency_map = list(explanation.saliency_map.values())[0]
saliency_maps.append(saliency_map)

score = self.pointing_game.evaluate(saliency_maps, [self.gt_bboxes[0], self.gt_bboxes[0]])
score = self.pointing_game.evaluate(saliency_maps, self.gt_bboxes * 2)
assert score > 0.5

insertion, deletion, delta = self.auc.evaluate(images, saliency_maps, self.steps)
assert insertion >= 0.9
assert deletion >= 0.2
assert delta >= 0.7
Loading

0 comments on commit fc55e93

Please sign in to comment.