Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

XAI classification notebook update #22

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 16 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,20 @@
# Change Log

## [1.1.0 Unreleased]

### Summary

*

### What's Changed

*

### New Contributors

*


## [1.0.0 Unreleased]

### Summary
Expand All @@ -11,7 +26,6 @@

### What's Changed

* Refactor names/folders/objects for better verbosity by @GalyaZalesskaya in https://github.com/openvinotoolkit/openvino_xai/pull/5
* Support classification task by @negvet in https://github.com/intel-sandbox/openvino_xai/commit/dd5fd9b73fe8c12e2d741792043372bcd900a850
* Support detection task by @negvet in https://github.com/intel-sandbox/openvino_xai/commit/84f285f2f40a8b1fc50a8cd49798aae37afd58dc
* Support Model API as inference engine by @negvet in https://github.com/intel-sandbox/openvino_xai/commit/5f575f122dedc0461975bd58f81e730a901a69a6
Expand Down Expand Up @@ -69,6 +83,7 @@
* Add unit test coverage setting by @goodsong81 in https://github.com/intel-sandbox/openvino_xai/pull/63
* Add LICENSE and SECURITY.md by @goodsong81 in https://github.com/intel-sandbox/openvino_xai/pull/64
* Add CHANGLOG.md by @goodsong81 in https://github.com/intel-sandbox/openvino_xai/pull/65
* Refactor names/folders/objects for better verbosity by @GalyaZalesskaya in https://github.com/openvinotoolkit/openvino_xai/pull/5

### New Contributors

Expand Down
97 changes: 34 additions & 63 deletions examples/run_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,7 @@

import openvino_xai as xai
from openvino_xai.common.utils import logger
from openvino_xai.explainer.parameters import (
ExplainMode,
ExplanationParameters,
TargetExplainGroup,
VisualizationParameters,
)
from openvino_xai.inserter.parameters import ClassificationInsertionParameters
from openvino_xai.explainer.mode import ExplainMode, TargetExplainGroup


def get_argument_parser():
Expand Down Expand Up @@ -61,14 +55,14 @@ def explain_auto(args):

# Prepare input image and explanation parameters, can be different for each explain call
image = cv2.imread(args.image_path)
explanation_parameters = ExplanationParameters(

# Generate explanation
explanation = explainer(
image,
target_explain_group=TargetExplainGroup.CUSTOM, # CUSTOM list of classes to explain, also ALL possible
target_explain_labels=[11, 14], # target classes to explain
)

# Generate explanation
explanation = explainer(image, explanation_parameters)

logger.info(
f"explain_auto: Generated {len(explanation.saliency_map)} classification "
f"saliency maps of layout {explanation.layout} with shape {explanation.shape}."
Expand All @@ -83,44 +77,39 @@ def explain_auto(args):
def explain_white_box(args):
"""
Advanced use case using ExplainMode.WHITEBOX.
insertion_parameters are provided to further configure the white-box method.
Insertion parameters (e.g. target_layer) are provided to further configure the white-box method (optional).
"""

# Create ov.Model
model: ov.Model
model = ov.Core().read_model(args.model_path)

# Optional - define insertion parameters
insertion_parameters = ClassificationInsertionParameters(
# target_layer="last_conv_node_name", # target_layer - node after which XAI branch will be inserted
target_layer="/backbone/conv/conv.2/Div", # OTX mnet_v3
# target_layer="/backbone/features/final_block/activate/Mul", # OTX effnet
embed_scaling=True, # True by default. If set to True, saliency map scale (0 ~ 255) operation is embedded in the model
explain_method=xai.Method.RECIPROCAM, # ReciproCAM is the default XAI method for CNNs
)

# Create explainer object
explainer = xai.Explainer(
model=model,
task=xai.Task.CLASSIFICATION,
preprocess_fn=preprocess_fn,
explain_mode=ExplainMode.WHITEBOX, # defaults to AUTO
insertion_parameters=insertion_parameters,
explain_method=xai.Method.RECIPROCAM, # ReciproCAM is the default XAI method for CNNs
# target_layer="last_conv_node_name", # target_layer - node after which XAI branch will be inserted
target_layer="/backbone/conv/conv.2/Div", # OTX mnet_v3
# target_layer="/backbone/features/final_block/activate/Mul", # OTX effnet
embed_scaling=True, # True by default. If set to True, saliency map scale (0 ~ 255) operation is embedded in the model
)

# Prepare input image and explanation parameters, can be different for each explain call
image = cv2.imread(args.image_path)
voc_labels = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
explanation_parameters = ExplanationParameters(

# Generate explanation
explanation = explainer(
image,
target_explain_group=TargetExplainGroup.CUSTOM, # CUSTOM list of classes to explain, also ALL possible
target_explain_labels=[11, 14], # target classes to explain, also ['dog', 'person'] is a valid input
label_names=voc_labels, # optional names
visualization_parameters=VisualizationParameters(overlay=True)
)

# Generate explanation
explanation = explainer(image, explanation_parameters)
overlay=True,
)

logger.info(
f"explain_white_box: Generated {len(explanation.saliency_map)} classification "
Expand Down Expand Up @@ -156,17 +145,14 @@ def explain_black_box(args):
image = cv2.imread(args.image_path)
voc_labels = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',
'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
explanation_parameters = ExplanationParameters(
target_explain_group=TargetExplainGroup.CUSTOM, # CUSTOM list of classes to explain, also ALL possible
target_explain_labels=['dog', 'person'], # target classes to explain, also [11, 14] possible
label_names=voc_labels, # optional names
visualization_parameters=VisualizationParameters(overlay=True)
)

# Generate explanation
explanation = explainer(
image,
explanation_parameters,
target_explain_group=TargetExplainGroup.CUSTOM, # CUSTOM list of classes to explain, also ALL possible
target_explain_labels=['dog', 'person'], # target classes to explain, also [11, 14] possible
label_names=voc_labels, # optional names
overlay=True,
num_masks=1000, # kwargs of the RISE algo
)

Expand Down Expand Up @@ -197,11 +183,6 @@ def explain_white_box_multiple_images(args):
preprocess_fn=preprocess_fn,
)

explanation_parameters = ExplanationParameters(
target_explain_group=TargetExplainGroup.CUSTOM, # CUSTOM list of classes to explain, also ALL possible
target_explain_labels=[14], # target classes to explain
)

# Create list of images
img_data_formats = (".jpg", ".jpeg", ".gif", ".bmp", ".tif", ".tiff", ".png")
if args.image_path.lower().endswith(img_data_formats):
Expand All @@ -216,7 +197,7 @@ def explain_white_box_multiple_images(args):

# Generate explanation
images = [cv2.imread(image_path) for image_path in img_files]
explanation = [explainer(image, explanation_parameters) for image in images]
explanation = [explainer(image, target_explain_group=TargetExplainGroup.CUSTOM, target_explain_labels=[14]) for image in images]

logger.info(
f"explain_white_box_multiple_images: Generated {len(explanation)} explanations "
Expand All @@ -236,32 +217,27 @@ def explain_white_box_vit(args):
model: ov.Model
model = ov.Core().read_model(args.model_path)

# Optional - define insertion parameters
insertion_parameters = ClassificationInsertionParameters(
# target_layer="/layers.10/ffn/Add", # OTX deit-tiny
# target_layer="/blocks/blocks.10/Add_1", # timm vit_base_patch8_224.augreg_in21k_ft_in1k
explain_method=xai.Method.VITRECIPROCAM,
)

# Create explainer object
explainer = xai.Explainer(
model=model,
task=xai.Task.CLASSIFICATION,
preprocess_fn=preprocess_fn,
explain_mode=ExplainMode.WHITEBOX, # defaults to AUTO
insertion_parameters=insertion_parameters,
explain_method=xai.Method.VITRECIPROCAM,
# target_layer="/layers.10/ffn/Add", # OTX deit-tiny
# target_layer="/blocks/blocks.10/Add_1", # timm vit_base_patch8_224.augreg_in21k_ft_in1k
)

# Prepare input image and explanation parameters, can be different for each explain call
image = cv2.imread(args.image_path)
explanation_parameters = ExplanationParameters(

# Generate explanation
explanation = explainer(
image,
target_explain_group=TargetExplainGroup.CUSTOM, # CUSTOM list of classes to explain, also ALL possible
target_explain_labels=[0, 1, 2, 3, 4], # target classes to explain
)

# Generate explanation
explanation = explainer(image, explanation_parameters)

logger.info(
f"explain_white_box_vit: Generated {len(explanation.saliency_map)} classification "
f"saliency maps of layout {explanation.layout} with shape {explanation.shape}."
Expand Down Expand Up @@ -298,26 +274,21 @@ def insert_xai(args):
def insert_xai_w_params(args):
"""
White-box scenario.
Insertion of the XAI branch into the IR with insertion parameters, thus IR has additional 'saliency_map' output.
Insertion of the XAI branch into the IR with insertion parameters (e.g. target_layer), thus, IR has additional 'saliency_map' output.
"""

# Create ov.Model
model: ov.Model
model = ov.Core().read_model(args.model_path)

# Define insertion parameters
insertion_parameters = ClassificationInsertionParameters(
target_layer="/backbone/conv/conv.2/Div", # OTX mnet_v3
# target_layer="/backbone/features/final_block/activate/Mul", # OTX effnet
embed_scaling=True,
explain_method=xai.Method.RECIPROCAM,
)

# insert XAI branch
model_xai = xai.insert_xai(
model,
task=xai.Task.CLASSIFICATION,
insertion_parameters=insertion_parameters,
explain_method=xai.Method.RECIPROCAM,
target_layer="/backbone/conv/conv.2/Div", # OTX mnet_v3
# target_layer="/backbone/features/final_block/activate/Mul", # OTX effnet
embed_scaling=True,
)

logger.info("insert_xai_w_params: XAI branch inserted into IR with parameters.")
Expand Down
24 changes: 7 additions & 17 deletions examples/run_detection.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,7 @@

import openvino_xai as xai
from openvino_xai.common.utils import logger
from openvino_xai.explainer.parameters import (
ExplainMode,
ExplanationParameters,
TargetExplainGroup,
)
from openvino_xai.inserter.parameters import DetectionInsertionParameters
from openvino_xai.explainer.mode import ExplainMode, TargetExplainGroup


def get_argument_parser():
Expand Down Expand Up @@ -62,32 +57,27 @@ def main(argv):
# "/bbox_head/atss_cls_3/Conv/WithoutBiases",
# "/bbox_head/atss_cls_4/Conv/WithoutBiases",
# ]
insertion_parameters = DetectionInsertionParameters(
target_layer=cls_head_output_node_names,
# num_anchors=[1, 1, 1, 1, 1],
saliency_map_size=(23, 23), # Optional
explain_method=xai.Method.DETCLASSPROBABILITYMAP, # Optional
)

# Create explainer object
explainer = xai.Explainer(
model=model,
task=xai.Task.DETECTION,
preprocess_fn=preprocess_fn,
explain_mode=ExplainMode.WHITEBOX, # defaults to AUTO
insertion_parameters=insertion_parameters,
target_layer=cls_head_output_node_names,
saliency_map_size=(23, 23), # Optional
)

# Prepare input image and explanation parameters, can be different for each explain call
image = cv2.imread(args.image_path)
explanation_parameters = ExplanationParameters(

# Generate explanation
explanation = explainer(
image,
target_explain_group=TargetExplainGroup.CUSTOM, # CUSTOM list of classes to explain, also ALL possible
target_explain_labels=[0, 1, 2, 3, 4], # target classes to explain
)

# Generate explanation
explanation = explainer(image, explanation_parameters)

logger.info(
f"Generated {len(explanation.saliency_map)} detection "
f"saliency maps of layout {explanation.layout} with shape {explanation.shape}."
Expand Down
Loading
Loading