Skip to content

Commit

Permalink
fix n_last_diffusion_steps_to_consider_for_attributions=0
Browse files Browse the repository at this point in the history
  • Loading branch information
JoaoLages committed Sep 10, 2022
1 parent 659ef21 commit 5fb28d5
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 41 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

setup(
name='diffusers-interpret',
version='0.3.0',
version='0.3.1',
description='diffusers-interpret: model explainability for 🤗 Diffusers',
long_description=long_description,
long_description_content_type='text/markdown',
Expand Down
81 changes: 41 additions & 40 deletions src/diffusers_interpret/explainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,50 +148,51 @@ def __call__(
# Get primary attribution scores
output.token_attributions = None
output.normalized_token_attributions = None
if calculate_attributions and attribution_method == 'grad_x_input':
if calculate_attributions:
if attribution_method == 'grad_x_input':

if self.verbose:
print("Calculating token attributions... ", end='')
if self.verbose:
print("Calculating token attributions... ", end='')

token_attributions = gradient_x_inputs_attribution(
pred_logits=output.image, input_embeds=text_embeddings,
explanation_2d_bounding_box=explanation_2d_bounding_box
)
token_attributions = token_attributions.detach().cpu().numpy()

# remove special tokens
assert len(token_attributions) == len(tokens)
output.token_attributions = []
output.normalized_token_attributions = []
for image_token_attributions, image_tokens in zip(token_attributions, tokens):
assert len(image_token_attributions) == len(image_tokens)

# Add token attributions
output.token_attributions.append([])
for attr, token in zip(image_token_attributions, image_tokens):
if consider_special_tokens or token not in self.special_tokens_attributes:

if clean_token_prefixes_and_suffixes:
token = clean_token_from_prefixes_and_suffixes(token)

output.token_attributions[-1].append(
(token, attr)
)

# Add normalized
total = sum([attr for _, attr in output.token_attributions[-1]])
output.normalized_token_attributions.append(
[
(token, round(100 * attr / total, 3))
for token, attr in output.token_attributions[-1]
]
token_attributions = gradient_x_inputs_attribution(
pred_logits=output.image, input_embeds=text_embeddings,
explanation_2d_bounding_box=explanation_2d_bounding_box
)
token_attributions = token_attributions.detach().cpu().numpy()

# remove special tokens
assert len(token_attributions) == len(tokens)
output.token_attributions = []
output.normalized_token_attributions = []
for image_token_attributions, image_tokens in zip(token_attributions, tokens):
assert len(image_token_attributions) == len(image_tokens)

# Add token attributions
output.token_attributions.append([])
for attr, token in zip(image_token_attributions, image_tokens):
if consider_special_tokens or token not in self.special_tokens_attributes:

if clean_token_prefixes_and_suffixes:
token = clean_token_from_prefixes_and_suffixes(token)

output.token_attributions[-1].append(
(token, attr)
)

# Add normalized
total = sum([attr for _, attr in output.token_attributions[-1]])
output.normalized_token_attributions.append(
[
(token, round(100 * attr / total, 3))
for token, attr in output.token_attributions[-1]
]
)

if self.verbose:
print("Done!")

if self.verbose:
print("Done!")

else:
raise NotImplementedError("Only `attribution_method='grad_x_input'` is implemented for now")
else:
raise NotImplementedError("Only `attribution_method='grad_x_input'` is implemented for now")

if batch_size == 1:
# squash batch dimension
Expand Down

0 comments on commit 5fb28d5

Please sign in to comment.