diff --git a/examples/run_classification.py b/examples/run_classification.py index 7eec881a..7d389638 100644 --- a/examples/run_classification.py +++ b/examples/run_classification.py @@ -70,7 +70,7 @@ def explain_auto(args): # Save saliency maps for visual inspection if args.output is not None: output = Path(args.output) / "explain_auto" - explanation.save(output, Path(args.image_path).stem) + explanation.save(output, f"{Path(args.image_path).stem}_") def explain_white_box(args): @@ -117,7 +117,7 @@ def explain_white_box(args): # Save saliency maps for visual inspection if args.output is not None: output = Path(args.output) / "explain_white_box" - explanation.save(output, Path(args.image_path).stem) + explanation.save(output, f"{Path(args.image_path).stem}_") def explain_black_box(args): @@ -160,7 +160,7 @@ def explain_black_box(args): # Save saliency maps for visual inspection if args.output is not None: output = Path(args.output) / "explain_black_box" - explanation.save(output, Path(args.image_path).stem) + explanation.save(output, f"{Path(args.image_path).stem}_") def explain_white_box_multiple_images(args): @@ -203,7 +203,7 @@ def explain_white_box_multiple_images(args): # Save saliency maps for visual inspection if args.output is not None: output = Path(args.output) / "explain_white_box_multiple_images" - explanation[0].save(output, Path(args.image_path).stem) + explanation[0].save(output, f"{Path(args.image_path).stem}_") def explain_white_box_vit(args): @@ -241,7 +241,7 @@ def explain_white_box_vit(args): # Save saliency maps for visual inspection if args.output is not None: output = Path(args.output) / "explain_white_box_vit" - explanation.save(output, Path(args.image_path).stem) + explanation.save(output, f"{Path(args.image_path).stem}_") def insert_xai(args): diff --git a/openvino_xai/explainer/explainer.py b/openvino_xai/explainer/explainer.py index 31c08afd..a27fc74b 100644 --- a/openvino_xai/explainer/explainer.py +++ b/openvino_xai/explainer/explainer.py @@ -221,8 +221,9 @@ def explain( explanation = Explanation( saliency_map=saliency_map, targets=targets, + task=self.task, label_names=label_names, - metadata=self.method.metadata, + predictions=self.method.predictions, ) return self._visualize( original_input_image, diff --git a/openvino_xai/explainer/explanation.py b/openvino_xai/explainer/explanation.py index 13f61ef5..28c9bc42 100644 --- a/openvino_xai/explainer/explanation.py +++ b/openvino_xai/explainer/explanation.py @@ -4,7 +4,7 @@ import os from enum import Enum from pathlib import Path -from typing import Any, Dict, List +from typing import Dict, List import cv2 import matplotlib.pyplot as plt @@ -17,6 +17,7 @@ explains_all, get_target_indices, ) +from openvino_xai.methods.base import Prediction class Explanation: @@ -28,16 +29,21 @@ class Explanation: :param targets: List of custom labels to explain, optional. Can be list of integer indices (int), or list of names (str) from label_names. :type targets: np.ndarray | List[int | str] | int | str + :param task: Type of the task: CLASSIFICATION or DETECTION. + :type task: Task :param label_names: List of all label names. :type label_names: List[str] | None + :param predictions: Per-target model prediction (available only for black-box methods). + :type predictions: Dict[int, Prediction] | None """ def __init__( self, saliency_map: np.ndarray | Dict[int | str, np.ndarray], targets: np.ndarray | List[int | str] | int | str, + task: Task, label_names: List[str] | None = None, - metadata: Dict[Task, Any] | None = None, + predictions: Dict[int, Prediction] | None = None, ): targets = convert_targets_to_numpy(targets) @@ -57,10 +63,12 @@ def __init__( self.layout = Layout.MULTIPLE_MAPS_PER_IMAGE_GRAY if not explains_all(targets) and not self.layout == Layout.ONE_MAP_PER_IMAGE_GRAY: - self._saliency_map = self._select_target_saliency_maps(targets, label_names) + label_names_ = None if task == Task.DETECTION else label_names + self._saliency_map = self._select_target_saliency_maps(targets, label_names_) + self.task = task self.label_names = label_names - self.metadata = metadata + self.predictions = predictions @property def saliency_map(self) -> Dict[int | str, np.ndarray]: @@ -180,7 +188,7 @@ def save( map_to_save = cv2.cvtColor(map_to_save, code=cv2.COLOR_RGB2BGR) if isinstance(target_idx, str): target_name = "activation_map" - elif self.label_names and isinstance(target_idx, np.int64): + elif self.label_names and isinstance(target_idx, np.int64) and self.task != Task.DETECTION: target_name = self.label_names[target_idx] else: target_name = str(target_idx) diff --git a/openvino_xai/explainer/visualizer.py b/openvino_xai/explainer/visualizer.py index aedd8295..32c5b3d4 100644 --- a/openvino_xai/explainer/visualizer.py +++ b/openvino_xai/explainer/visualizer.py @@ -1,7 +1,7 @@ # Copyright (C) 2023-2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -from typing import Any, Dict, List, Tuple +from typing import Dict, List, Tuple import cv2 import numpy as np @@ -16,6 +16,7 @@ Explanation, Layout, ) +from openvino_xai.methods.base import Prediction def resize(saliency_map: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: @@ -80,6 +81,7 @@ def __call__( colormap: bool = True, overlay: bool = False, overlay_weight: float = 0.5, + overlay_prediction: bool = True, ) -> Explanation: return self.visualize( explanation, @@ -90,6 +92,7 @@ def __call__( colormap, overlay, overlay_weight, + overlay_prediction, ) def visualize( @@ -102,6 +105,7 @@ def visualize( colormap: bool = True, overlay: bool = False, overlay_weight: float = 0.5, + overlay_prediction: bool = True, ) -> Explanation: """ Saliency map postprocess method. @@ -126,6 +130,8 @@ def visualize( :type overlay: bool :parameter overlay_weight: Weight of the saliency map when overlaying the input data with the saliency map. :type overlay_weight: float + :parameter overlay_prediction: If True, plot model prediction over the overlay. + :type overlay_prediction: bool """ if original_input_image is not None: original_input_image = format_to_bhwc(original_input_image) @@ -147,7 +153,14 @@ def visualize( saliency_map_np = self._apply_overlay( explanation, saliency_map_np, original_input_image, output_size, overlay_weight ) - saliency_map_np = self._apply_metadata(explanation.metadata, saliency_map_np, indices_to_return) + if overlay_prediction and explanation.task == Task.CLASSIFICATION: + self._put_classification_info( + saliency_map_np, indices_to_return, explanation.label_names, explanation.predictions # type:ignore + ) + if overlay_prediction and explanation.task == Task.DETECTION: + self._put_detection_info( + saliency_map_np, indices_to_return, explanation.label_names, explanation.predictions # type:ignore + ) else: if resize: if original_input_image is None and output_size is None: @@ -162,27 +175,61 @@ def visualize( return self._update_explanation_with_processed_sal_map(explanation, saliency_map_np, indices_to_return) @staticmethod - def _apply_metadata(metadata: Dict[Task, Any], saliency_map_np: np.ndarray, indices: List[int | str]): - # TODO (negvet): support when indices are strings - if metadata: - if Task.DETECTION in metadata: - for smap_i, target_index in zip(range(len(saliency_map_np)), indices): - saliency_map = saliency_map_np[smap_i] - box, score, label_index = metadata[Task.DETECTION][target_index] - x1, y1, x2, y2 = box - cv2.rectangle(saliency_map, (int(x1), int(y1)), (int(x2), int(y2)), color=(255, 0, 0), thickness=2) - box_label = f"{label_index}|{score:.2f}" - box_label_loc = int(x1), int(y1 - 5) - cv2.putText( - saliency_map, - box_label, - org=box_label_loc, - fontFace=1, - fontScale=1, - color=(255, 0, 0), - thickness=2, - ) - return saliency_map_np + def _put_classification_info( + saliency_map_np: np.ndarray, + indices: List[int], + label_names: List[str] | None, + predictions: Dict[int, Prediction] | None, + ) -> None: + corner_location = 3, 17 + for smap, target_index in zip(range(len(saliency_map_np)), indices): + label = label_names[target_index] if label_names else str(target_index) + if predictions and target_index in predictions: + score = predictions[target_index].score + if score: + label = f"{label}|{score:.2f}" + + cv2.putText( + saliency_map_np[smap], + label, + org=corner_location, + fontFace=1, + fontScale=1.3, + color=(255, 0, 0), + thickness=2, + ) + + @staticmethod + def _put_detection_info( + saliency_map_np: np.ndarray, + indices: List[int], + label_names: List[str] | None, + predictions: Dict[int, Prediction] | None, + ) -> None: + if not predictions: + return + + for smap, target_index in zip(range(len(saliency_map_np)), indices): + saliency_map = saliency_map_np[smap] + label_index = predictions[target_index].label + score = predictions[target_index].score + box = predictions[target_index].bounding_box + + x1, y1, x2, y2 = np.array(box, dtype=np.int32) + cv2.rectangle(saliency_map, (x1, y1), (x2, y2), color=(255, 0, 0), thickness=2) + + label = label_names[label_index] if label_names else label_index + label_score = f"{label}|{score:.2f}" + box_location = int(x1), int(y1 - 5) + cv2.putText( + saliency_map, + label_score, + org=box_location, + fontFace=1, + fontScale=1.3, + color=(255, 0, 0), + thickness=2, + ) @staticmethod def _apply_scaling(explanation: Explanation, saliency_map_np: np.ndarray) -> np.ndarray: diff --git a/openvino_xai/methods/base.py b/openvino_xai/methods/base.py index 59d207db..59033fe9 100644 --- a/openvino_xai/methods/base.py +++ b/openvino_xai/methods/base.py @@ -1,14 +1,13 @@ # Copyright (C) 2024 Intel Corporation # SPDX-License-Identifier: Apache-2.0 -import collections from abc import ABC, abstractmethod -from typing import Any, Callable, Dict, Mapping +from dataclasses import dataclass +from typing import Callable, Dict, List, Mapping, Tuple import numpy as np import openvino as ov -from openvino_xai.common.parameters import Task from openvino_xai.common.utils import IdentityPreprocessFN @@ -25,7 +24,7 @@ def __init__( self._model_compiled = None self.preprocess_fn = preprocess_fn self._device_name = device_name - self.metadata: Dict[Task, Any] = collections.defaultdict(dict) + self.predictions: Dict[int, Prediction] = {} @property def model_compiled(self) -> ov.CompiledModel | None: @@ -50,3 +49,10 @@ def generate_saliency_map(self, data: np.ndarray) -> Dict[int, np.ndarray] | np. def load_model(self) -> None: core = ov.Core() self._model_compiled = core.compile_model(model=self._model, device_name=self._device_name) + + +@dataclass +class Prediction: + label: int | None = None + score: float | None = None + bounding_box: List | Tuple | None = None diff --git a/openvino_xai/methods/black_box/aise/base.py b/openvino_xai/methods/black_box/aise/base.py index e1c98289..d384077d 100644 --- a/openvino_xai/methods/black_box/aise/base.py +++ b/openvino_xai/methods/black_box/aise/base.py @@ -41,8 +41,9 @@ def __init__( device_name: str = "CPU", prepare_model: bool = True, ): - super().__init__(model=model, preprocess_fn=preprocess_fn, device_name=device_name) - self.postprocess_fn = postprocess_fn + super().__init__( + model=model, postprocess_fn=postprocess_fn, preprocess_fn=preprocess_fn, device_name=device_name + ) self.data_preprocessed = None self.target: int | None = None diff --git a/openvino_xai/methods/black_box/aise/classification.py b/openvino_xai/methods/black_box/aise/classification.py index a4f3e340..f9b38f2e 100644 --- a/openvino_xai/methods/black_box/aise/classification.py +++ b/openvino_xai/methods/black_box/aise/classification.py @@ -16,6 +16,7 @@ scaling, sigmoid, ) +from openvino_xai.methods.base import Prediction from openvino_xai.methods.black_box.aise.base import AISEBase, GaussianPerturbationMask from openvino_xai.methods.black_box.base import Preset from openvino_xai.methods.black_box.utils import check_classification_output @@ -91,11 +92,12 @@ def generate_saliency_map( # type: ignore """ self.data_preprocessed = self.preprocess_fn(data) + logits = self.get_logits(self.data_preprocessed) if target_indices is None: - num_classes = self.get_num_classes(self.data_preprocessed) - if num_classes > 10: - logger.info(f"num_classes = {num_classes}, which might take significant time to process.") + num_classes = logits.shape[1] target_indices = list(range(num_classes)) + if len(target_indices) > 10: + logger.info(f"{len(target_indices)} targets to process, which might take significant time.") self.num_iterations_per_kernel, self.kernel_widths = self._preset_parameters( preset, @@ -110,6 +112,7 @@ def generate_saliency_map( # type: ignore self._mask_generator = GaussianPerturbationMask(self.input_size) saliency_maps = {} + self.predictions = {} for target in target_indices: self.kernel_params_hist = collections.defaultdict(list) self.pred_score_hist = collections.defaultdict(list) @@ -119,6 +122,10 @@ def generate_saliency_map( # type: ignore if scale_output: saliency_map_per_target = scaling(saliency_map_per_target) saliency_maps[target] = saliency_map_per_target + self.predictions[target] = Prediction( + label=target, + score=logits[0][target], + ) return saliency_maps @staticmethod diff --git a/openvino_xai/methods/black_box/aise/detection.py b/openvino_xai/methods/black_box/aise/detection.py index ae75f6e7..7c8599bf 100644 --- a/openvino_xai/methods/black_box/aise/detection.py +++ b/openvino_xai/methods/black_box/aise/detection.py @@ -2,20 +2,20 @@ # SPDX-License-Identifier: Apache-2.0 import collections -from typing import Any, Callable, Dict, List, Tuple +from typing import Callable, Dict, List, Tuple import numpy as np import openvino.runtime as ov from openvino.runtime.utils.data_helpers.wrappers import OVDict from scipy.optimize import Bounds -from openvino_xai.common.parameters import Task from openvino_xai.common.utils import ( IdentityPreprocessFN, infer_size_from_image, logger, scaling, ) +from openvino_xai.methods.base import Prediction from openvino_xai.methods.black_box.aise.base import AISEBase, GaussianPerturbationMask from openvino_xai.methods.black_box.base import Preset from openvino_xai.methods.black_box.utils import check_detection_output @@ -56,6 +56,7 @@ def __init__( prepare_model=prepare_model, ) self.deletion = False + self.predictions = {} def generate_saliency_map( # type: ignore self, @@ -120,7 +121,7 @@ def generate_saliency_map( # type: ignore self._mask_generator = GaussianPerturbationMask(self.input_size) saliency_maps = {} - self.metadata: Dict[Task, Any] = collections.defaultdict(dict) + self.predictions = {} for target in target_indices: self.target_box = boxes[target] self.target_label = labels[target] @@ -137,7 +138,7 @@ def generate_saliency_map( # type: ignore saliency_map_per_target = scaling(saliency_map_per_target) saliency_maps[target] = saliency_map_per_target - self._update_metadata(boxes, scores, labels, target, original_size) + self._update_predictions(boxes, scores, labels, target, original_size) return saliency_maps @staticmethod @@ -205,7 +206,7 @@ def _iou(box1: np.ndarray | List[float], box2: np.ndarray | List[float]) -> floa area2 = np.prod(box2[2:] - box2[:2]) return intersection / (area1 + area2 - intersection) - def _update_metadata( + def _update_predictions( self, boxes: np.ndarray | List, scores: np.ndarray | List[float], @@ -218,4 +219,8 @@ def _update_metadata( height_scale = original_size[0] / self.input_size[0] x1, x2 = x1 * width_scale, x2 * width_scale y1, y2 = y1 * height_scale, y2 * height_scale - self.metadata[Task.DETECTION][target] = [x1, y1, x2, y2], scores[target], labels[target] + self.predictions[target] = Prediction( + label=labels[target], + score=scores[target], + bounding_box=(x1, y1, x2, y2), + ) diff --git a/openvino_xai/methods/black_box/base.py b/openvino_xai/methods/black_box/base.py index 12302218..16fe12d3 100644 --- a/openvino_xai/methods/black_box/base.py +++ b/openvino_xai/methods/black_box/base.py @@ -2,9 +2,12 @@ # SPDX-License-Identifier: Apache-2.0 from enum import Enum +from typing import Callable, Mapping +import numpy as np import openvino.runtime as ov +from openvino_xai.common.utils import IdentityPreprocessFN from openvino_xai.methods.base import MethodBase from openvino_xai.methods.black_box.utils import check_classification_output @@ -12,18 +15,28 @@ class BlackBoxXAIMethod(MethodBase): """Base class for methods that explain model in Black-Box mode.""" + def __init__( + self, + model: ov.Model, + postprocess_fn: Callable[[Mapping], np.ndarray], + preprocess_fn: Callable[[np.ndarray], np.ndarray] = IdentityPreprocessFN(), + device_name: str = "CPU", + ): + super().__init__(model=model, preprocess_fn=preprocess_fn, device_name=device_name) + self.postprocess_fn = postprocess_fn + def prepare_model(self, load_model: bool = True) -> ov.Model: """Load model prior to inference.""" if load_model: self.load_model() return self._model - def get_num_classes(self, data_preprocessed): - """Estimates number of classes for the classification model. Expects batch dimention.""" + def get_logits(self, data_preprocessed: np.ndarray) -> np.ndarray: + """Gets logits for the classification model. Expects batch dimention.""" forward_output = self.model_forward(data_preprocessed, preprocess=False) logits = self.postprocess_fn(forward_output) check_classification_output(logits) - return logits.shape[1] + return logits class Preset(Enum): diff --git a/openvino_xai/methods/black_box/rise.py b/openvino_xai/methods/black_box/rise.py index dec17423..e9a31024 100644 --- a/openvino_xai/methods/black_box/rise.py +++ b/openvino_xai/methods/black_box/rise.py @@ -9,6 +9,7 @@ from tqdm import tqdm from openvino_xai.common.utils import IdentityPreprocessFN, is_bhwc_layout, scaling +from openvino_xai.methods.base import Prediction from openvino_xai.methods.black_box.base import BlackBoxXAIMethod, Preset from openvino_xai.methods.black_box.utils import check_classification_output @@ -41,8 +42,9 @@ def __init__( device_name: str = "CPU", prepare_model: bool = True, ): - super().__init__(model=model, preprocess_fn=preprocess_fn, device_name=device_name) - self.postprocess_fn = postprocess_fn + super().__init__( + model=model, postprocess_fn=postprocess_fn, preprocess_fn=preprocess_fn, device_name=device_name + ) if prepare_model: self.prepare_model() @@ -132,9 +134,10 @@ def _run_synchronous_explanation( ) -> np.ndarray: input_size = data_preprocessed.shape[1:3] if is_bhwc_layout(data_preprocessed) else data_preprocessed.shape[2:4] - num_classes = self.get_num_classes(data_preprocessed) + logits = self.get_logits(data_preprocessed) if target_classes is None: + num_classes = logits.shape[1] num_targets = num_classes else: num_targets = len(target_classes) @@ -159,6 +162,11 @@ def _run_synchronous_explanation( if target_classes is not None: saliency_maps = self._reformat_as_dict(saliency_maps, target_classes) + for target in target_classes: + self.predictions[target] = Prediction( + label=target, + score=logits[0][target], + ) return saliency_maps @staticmethod diff --git a/tests/unit/explanation/test_explanation.py b/tests/unit/explanation/test_explanation.py index 1cf2a791..c72e233d 100644 --- a/tests/unit/explanation/test_explanation.py +++ b/tests/unit/explanation/test_explanation.py @@ -7,6 +7,7 @@ import numpy as np import pytest +from openvino_xai.common.parameters import Task from openvino_xai.explainer.explanation import Explanation from tests.unit.explanation.test_explanation_utils import VOC_NAMES @@ -20,6 +21,7 @@ def test_targets(self): explanation_indices = Explanation( SALIENCY_MAPS, targets=explain_targets, + task=Task.CLASSIFICATION, label_names=VOC_NAMES, ) @@ -27,6 +29,7 @@ def test_targets(self): explanation_names = Explanation( SALIENCY_MAPS, targets=explain_targets, + task=Task.CLASSIFICATION, label_names=VOC_NAMES, ) @@ -80,6 +83,7 @@ def _get_explanation(self, saliency_maps=SALIENCY_MAPS, label_names=VOC_NAMES): explanation = Explanation( saliency_maps, targets=explain_targets, + task=Task.CLASSIFICATION, label_names=label_names, ) return explanation diff --git a/tests/unit/explanation/test_visualization.py b/tests/unit/explanation/test_visualization.py index 0f3bf6d8..9a59fd0a 100644 --- a/tests/unit/explanation/test_visualization.py +++ b/tests/unit/explanation/test_visualization.py @@ -8,6 +8,7 @@ from openvino_xai.common.utils import get_min_max, scaling from openvino_xai.explainer.explanation import Explanation from openvino_xai.explainer.visualizer import Visualizer, colormap, overlay, resize +from openvino_xai.methods.base import Prediction SALIENCY_MAPS = [ (np.random.rand(1, 5, 5) * 255).astype(np.uint8), @@ -98,6 +99,7 @@ def test_overlay(): class TestVisualizer: @pytest.mark.parametrize("saliency_maps", SALIENCY_MAPS) @pytest.mark.parametrize("explain_all_classes", EXPLAIN_ALL_CLASSES) + @pytest.mark.parametrize("task", [Task.CLASSIFICATION, Task.DETECTION]) @pytest.mark.parametrize("scaling", [True, False]) @pytest.mark.parametrize("resize", [True, False]) @pytest.mark.parametrize("colormap", [True, False]) @@ -107,6 +109,7 @@ def test_visualizer( self, saliency_maps, explain_all_classes, + task, scaling, resize, colormap, @@ -118,7 +121,7 @@ def test_visualizer( else: explain_targets = [0] - explanation = Explanation(saliency_maps, targets=explain_targets) + explanation = Explanation(saliency_maps, targets=explain_targets, task=Task.CLASSIFICATION) raw_sal_map_dims = len(explanation.shape) original_input_image = np.ones((20, 20, 3)) @@ -148,7 +151,7 @@ def test_visualizer( assert map_.shape[:2] == original_input_image.shape[:2] if isinstance(saliency_maps, np.ndarray) and saliency_maps.ndim == 3 and not overlay: - explanation = Explanation(saliency_maps, targets=-1) + explanation = Explanation(saliency_maps, targets=-1, task=Task.CLASSIFICATION) visualizer = Visualizer() explanation_output_size = visualizer( explanation=explanation, @@ -164,13 +167,11 @@ def test_visualizer( assert np.all(maps_data["per_image_map"] == maps_size["per_image_map"]) if isinstance(saliency_maps, dict): - metadata = { - Task.DETECTION: { - 0: ([5, 0, 7, 4], 0.5, 0), - 1: ([2, 5, 9, 7], 0.5, 0), - } + predictions = { + 0: Prediction(bounding_box=[5, 0, 7, 4], score=0.5, label=0), + 1: Prediction(bounding_box=[2, 5, 9, 7], score=0.5, label=0), } - explanation = Explanation(saliency_maps, targets=-1, metadata=metadata) + explanation = Explanation(saliency_maps, targets=-1, task=task, predictions=predictions) visualizer = Visualizer() explanation_output_size = visualizer( explanation=explanation, diff --git a/tests/unit/methods/black_box/test_black_box_method.py b/tests/unit/methods/black_box/test_black_box_method.py index 8115609d..cd42a539 100644 --- a/tests/unit/methods/black_box/test_black_box_method.py +++ b/tests/unit/methods/black_box/test_black_box_method.py @@ -11,6 +11,7 @@ from openvino_xai.common.utils import retrieve_otx_model from openvino_xai.explainer.utils import get_postprocess_fn, get_preprocess_fn +from openvino_xai.methods.base import Prediction from openvino_xai.methods.black_box.aise.classification import AISEClassification from openvino_xai.methods.black_box.aise.detection import AISEDetection from openvino_xai.methods.black_box.base import Preset @@ -83,6 +84,8 @@ def test_run(self, target_indices, fxt_data_root: Path): assert len(saliency_map) == len(target_indices) for target in target_indices: assert target in saliency_map + assert target in aise_method.predictions + assert isinstance(aise_method.predictions[target], Prediction) ref_target = 0 assert saliency_map[ref_target].dtype == np.uint8 @@ -135,6 +138,8 @@ def test_run(self, target_indices, fxt_data_root: Path): assert len(saliency_map) == len(target_indices) for target in target_indices: assert target in saliency_map + assert target in aise_method.predictions + assert isinstance(aise_method.predictions[target], Prediction) ref_target = 0 assert saliency_map[ref_target].dtype == np.uint8 @@ -201,6 +206,9 @@ def test_run(self, target_indices, fxt_data_root: Path): actual_sal_vals = saliency_map[0][0, :10].astype(np.int16) ref_sal_vals = np.array([246, 241, 236, 231, 226, 221, 216, 211, 205, 197], dtype=np.uint8) assert np.all(np.abs(actual_sal_vals - ref_sal_vals) <= 1) + + assert target_indices[0] in rise_method.predictions + assert isinstance(rise_method.predictions[target_indices[0]], Prediction) else: isinstance(saliency_map, np.ndarray) assert saliency_map.dtype == np.uint8 diff --git a/tests/unit/methods/white_box/test_white_box_method.py b/tests/unit/methods/white_box/test_white_box_method.py index 5ee15c44..8684811d 100644 --- a/tests/unit/methods/white_box/test_white_box_method.py +++ b/tests/unit/methods/white_box/test_white_box_method.py @@ -39,6 +39,7 @@ def test_initialization(self): assert xai_method.embed_scaling assert not xai_method.per_class assert xai_method._target_layer == self.target_layer + assert xai_method.predictions == {} def test_generate_xai_branch(self): """Test that ActivationMap creates a proper XAI branch node.""" @@ -96,6 +97,7 @@ def test_initialization(self): assert reciprocam_xai_method.embed_scaling assert reciprocam_xai_method.per_class assert reciprocam_xai_method._target_layer == self.target_layer + assert reciprocam_xai_method.predictions == {} def test_generate_xai_branch(self): """Test that ReciproCAM creates a proper XAI branch node.""" @@ -153,6 +155,7 @@ def test_initialization(self): assert reciprocam_xai_method.embed_scaling assert reciprocam_xai_method.per_class assert reciprocam_xai_method._target_layer == self.target_layer + assert reciprocam_xai_method.predictions == {} @pytest.mark.parametrize("use_gaussian", [True, False]) def test_generate_xai_branch(self, use_gaussian): @@ -233,6 +236,7 @@ def test_initialization(self): assert detection_xai_method._target_layer == self.target_layer assert detection_xai_method._num_anchors == self.num_anchors assert detection_xai_method._saliency_map_size == (23, 23) + assert detection_xai_method.predictions == {} def test_generate_xai_branch(self): """Test that DetClassProbabilityMap creates a proper XAI branch node.""" diff --git a/tests/unit/metrics/test_adcc.py b/tests/unit/metrics/test_adcc.py index f4ac0309..7e6dca91 100644 --- a/tests/unit/metrics/test_adcc.py +++ b/tests/unit/metrics/test_adcc.py @@ -74,7 +74,10 @@ def test_adcc(self): def test_evaluate(self): input_images = [np.random.rand(224, 224, 3) for _ in range(5)] explanations = [ - Explanation({0: np.random.rand(224, 224), 1: np.random.rand(224, 224)}, targets=[0, 1]) for _ in range(5) + Explanation( + {0: np.random.rand(224, 224), 1: np.random.rand(224, 224)}, targets=[0, 1], task=Task.CLASSIFICATION + ) + for _ in range(5) ] adcc_score = self.adcc.evaluate(explanations, input_images)["adcc"] diff --git a/tests/unit/metrics/test_auc.py b/tests/unit/metrics/test_auc.py index 1346012a..4ec1f68a 100644 --- a/tests/unit/metrics/test_auc.py +++ b/tests/unit/metrics/test_auc.py @@ -57,7 +57,10 @@ def test_insertion_deletion_auc(self): def test_evaluate(self): input_images = [np.random.rand(224, 224, 3) for _ in range(5)] explanations = [ - Explanation({0: np.random.rand(224, 224), 1: np.random.rand(224, 224)}, targets=[0, 1]) for _ in range(5) + Explanation( + {0: np.random.rand(224, 224), 1: np.random.rand(224, 224)}, targets=[0, 1], task=Task.CLASSIFICATION + ) + for _ in range(5) ] insertion, deletion, delta = self.auc.evaluate(explanations, input_images, self.steps).values() diff --git a/tests/unit/metrics/test_pointing_game.py b/tests/unit/metrics/test_pointing_game.py index b6de5403..6592de60 100644 --- a/tests/unit/metrics/test_pointing_game.py +++ b/tests/unit/metrics/test_pointing_game.py @@ -3,6 +3,7 @@ import numpy as np import pytest +from openvino_xai.common.parameters import Task from openvino_xai.explainer.explanation import Explanation from openvino_xai.metrics.pointing_game import PointingGame @@ -29,6 +30,7 @@ def test_pointing_game_evaluate(self, caplog): explanation = Explanation( label_names=["cat", "dog"], targets=[0, 1], + task=Task.CLASSIFICATION, saliency_map={0: [[0, 1], [2, 3]], 1: [[0, 0], [0, 1]]}, ) explanations = [explanation] @@ -58,6 +60,7 @@ def test_pointing_game_evaluate(self, caplog): explanation = Explanation( label_names=None, targets=[0, 1], + task=Task.CLASSIFICATION, saliency_map={0: [[0, 1], [2, 3]], 1: [[0, 0], [0, 1]]}, ) explanations = [explanation]