From 5275612118b605af52a32ff3b3b8496b47d6e4d1 Mon Sep 17 00:00:00 2001 From: Larissa Heinrich Date: Mon, 26 Aug 2024 19:05:13 +0000 Subject: [PATCH 01/11] misc corrections - html fixes - plotting updated attributions - plotting updated attributions in color - don't let students choose style size - add checkpoint --- solution.py | 56 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 37 insertions(+), 19 deletions(-) diff --git a/solution.py b/solution.py index e8c7455..c85b685 100644 --- a/solution.py +++ b/solution.py @@ -53,16 +53,16 @@ ax.axis("off") # %% [markdown] -# We have pre-traiend a classifier for you on this dataset. It is the same architecture classifier as you used in the Failure Modes exercise: a `DenseModel`. +# During the setup you have pre-traiend a classifier on this dataset. It is the same architecture classifier as you used in the Failure Modes exercise: a `DenseModel`. # Let's load that classifier now! # %% [markdown] #

Task 1.1: Load the classifier

-# We have written a slightly more general version of the `DenseModel` that you used in the previous exercise. Ours requires two inputs: -# - `input_shape`: the shape of the input images, as a tuple -# - `num_classes`: the number of classes in the dataset +# We have written a slightly more general version of the DenseModel that you used in the previous exercise. Ours requires two inputs: +#
  • input_shape: the shape of the input images, as a tuple
  • +#
  • num_classes: the number of classes in the dataset
  • # # Create a dense model with the right inputs and load the weights from the checkpoint. -#
    +#
    # %% tags=["task"] import torch from classifier.model import DenseModel @@ -203,7 +203,7 @@ def visualize_attribution(attribution, original_image): # %% [markdown] # -# The attributions are shown as a heatmap. The brighter the pixel, the more important this attribution method thinks that it is. +# The attributions are shown as a heatmap. The closer to 1 the pixel value, the more important this attribution method thinks that it is. # As you can see, it is pretty good at recognizing the number within the image. # As we know, however, it is not the digit itself that is important for the classification, it is the color! # Although the method is picking up really well on the region of interest, it would be difficult to conclude from this that it is the color that matters. @@ -234,7 +234,7 @@ def visualize_color_attribution(attribution, original_image): # %% [markdown] # We get some better clues when looking at the attributions in color. -# The highlighting doesn't just happen in the region with number, but also seems to hapen in a channel that matches the color of the image. +# The highlighting doesn't just happen in the region with number, but also seems to happen in a channel that matches the color of the image. # Just based on this, however, we don't get much more information than we got from the images themselves. # # If we didn't know in advance, it is unclear whether the color or the number is the most important feature for the classifier. @@ -270,11 +270,12 @@ def visualize_color_attribution(attribution, original_image): random_baselines = ... # TODO Change # Generate the attributions attributions_random = integrated_gradients.attribute(...) # TODO Change +attributions_random = attributions_random.cpu().numpy() # Plotting -for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()): +for attr, im, lbl in zip(attributions_random, x.cpu().numpy(), y.cpu().numpy()): print(f"Class {lbl}") - visualize_attribution(attr, im) + visualize_color_attribution(attr, im) # %% tags=["solution"] ######################### @@ -286,9 +287,9 @@ def visualize_color_attribution(attribution, original_image): attributions_random = integrated_gradients.attribute( x, target=y, baselines=random_baselines ) - +attributions_random = attributions_random.cpu().numpy() # Plotting -for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()): +for attr, im, lbl in zip(attributions_random, x.cpu().numpy(), y.cpu().numpy()): print(f"Class {lbl}") visualize_color_attribution(attr, im) @@ -306,8 +307,9 @@ def visualize_color_attribution(attribution, original_image): # Generate the attributions attributions_blurred = integrated_gradients.attribute(...) # TODO Fill +attributions_blurred = attributions_blurred.cpu().numpy() # Plotting -for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()): +for attr, im, lbl in zip(attributions_blurred, x.cpu().numpy(), y.cpu().numpy()): print(f"Class {lbl}") visualize_color_attribution(attr, im) @@ -324,8 +326,10 @@ def visualize_color_attribution(attribution, original_image): x, target=y, baselines=blurred_baselines ) +attributions_blurred = attributions_blurred.cpu().numpy() + # Plotting -for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()): +for attr, im, lbl in zip(attributions_blurred, x.cpu().numpy(), y.cpu().numpy()): print(f"Class {lbl}") visualize_color_attribution(attr, im) @@ -349,7 +353,7 @@ def visualize_color_attribution(attribution, original_image): # %% [markdown] #

    Checkpoint 2

    -# Let us know on the exercise chat when you've reached this point! +# Put up your green sticky note when you've reached this point! # # At this point we have: # @@ -371,9 +375,9 @@ def visualize_color_attribution(attribution, original_image): # **What is a counterfactual?** # # You've learned about adversarial examples in the lecture on failure modes. These are the imperceptible or noisy changes to an image that drastically changes a classifier's opinion. -# Counterfactual explanations are the useful cousins of adversarial examples. They are *perceptible* and *informative* changes to an image that changes a classifier's opinion. +# Counterfactual explanations are the useful cousins of adversarial examples. They are *perceptible* and *informative* changes to an image that change a classifier's opinion. # -# In the image below you can see the difference between the two. In the first column are MNIST images along with their classifictaions, and in the second column are counterfactual explanations to *change* that class. You can see that in both cases a human being would (hopefully) agree with the new classification. By comparing the two columns, we can therefore begin to define what makes each digit special. +# In the image below you can see the difference between the two. In the first column are (non-color) MNIST images along with their classifications, and in the second column are counterfactual explanations to *change* that class. You can see that in both cases a human being would (hopefully) agree with the new classification. By comparing the two columns, we can therefore begin to define what makes each digit special. # # In contrast, the third and fourth columns show an MNIST image and a corresponding adversarial example. Here the network returns a prediction that most human beings (who aren't being facetious) would strongly disagree with. # @@ -429,7 +433,7 @@ def forward(self, x, y): # # Given the Generator structure above, fill in the missing parts for the unet and the style mapping. # %% tags=["task"] -style_size = ... # TODO choose a size for the style space +style_size = 3 unet_depth = ... # TODO Choose a depth for the UNet style_encoder = DenseModel( input_shape=..., num_classes=... # How big is the style space? @@ -447,7 +451,7 @@ def forward(self, x, y): # %% [markdown] tags=[] #

    Hyper-parameter choices

    #
      -#
    • Are any of the hyperparameters you choose above constrained in some way?
    • +#
    • Are any of the hyperparameters above constrained in some way?
    • #
    • What would happen if you chose a depth of 10 for the UNet?
    • #
    • Is there a minimum size for the style space? Why or why not?
    • #
    @@ -556,6 +560,20 @@ def copy_parameters(source_model, target_model): generator_ema = Generator(deepcopy(unet), style_encoder=deepcopy(style_encoder)) generator_ema = generator_ema.to(device) +# %% [markdown] +#

    Checkpoint 3

    +# Put up your green sticky note when you've reached this point! +# +# At this point we have: +# +# - Loaded a classifier that classifies MNIST-like images by color, but we don't know how! +# - Tried applying Integrated Gradients to find out what the classifier is looking at - with little success. +# - Discovered the effect of changing the baseline on the output of integrated gradients. +# - Defined the hyperparameters for a StarGAN to create counterfactual images. +# +# Next up, we will define the training loop for the StarGAN. +#
    + # %% [markdown] tags=[] #

    Task 3.3: Training!

    # You were given several different options in the training code below. In each case, one of the options will work, and the other will not. @@ -774,7 +792,7 @@ def copy_parameters(source_model, target_model): # The same method can be used to create a StarGAN with different basic elements. # For example, you can change the archictecture of the generators, or of the discriminator to better fit your data in the future. # -# You know the drill... let us know on the exercise chat when you have arrived here! +# You know the drill... put up your green sticky note when you have arrived here! #
    # %% [markdown] tags=[] From b7c97046fd47793b466bd2a6aae68a6d0549f609 Mon Sep 17 00:00:00 2001 From: neptunes5thmoon Date: Mon, 26 Aug 2024 19:06:42 +0000 Subject: [PATCH 02/11] Commit from GitHub Actions (Build Notebooks) --- exercise.ipynb | 257 ++++++++++++++++++++++++++----------------------- solution.ipynb | 257 ++++++++++++++++++++++++++----------------------- 2 files changed, 278 insertions(+), 236 deletions(-) diff --git a/exercise.ipynb b/exercise.ipynb index 0ad1bfb..bde574d 100644 --- a/exercise.ipynb +++ b/exercise.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "0e557335", + "id": "eeffd47d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "8667a26e", + "id": "d9b65175", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "e36eca8a", + "id": "025f7566", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ffea56f9", + "id": "7f4771f3", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "55cab99a", + "id": "9d60bf35", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1fe21cf9", + "id": "4acd91e9", "metadata": {}, "outputs": [], "source": [ @@ -102,35 +102,35 @@ }, { "cell_type": "markdown", - "id": "610b5cdb", + "id": "9e70adfb", "metadata": { "lines_to_next_cell": 0 }, "source": [ - "We have pre-traiend a classifier for you on this dataset. It is the same architecture classifier as you used in the Failure Modes exercise: a `DenseModel`.\n", + "During the setup you have pre-traiend a classifier on this dataset. It is the same architecture classifier as you used in the Failure Modes exercise: a `DenseModel`.\n", "Let's load that classifier now!" ] }, { "cell_type": "markdown", - "id": "275b9fc0", + "id": "aa1a58cf", "metadata": { "lines_to_next_cell": 0 }, "source": [ "

    Task 1.1: Load the classifier

    \n", - "We have written a slightly more general version of the `DenseModel` that you used in the previous exercise. Ours requires two inputs:\n", - "- `input_shape`: the shape of the input images, as a tuple\n", - "- `num_classes`: the number of classes in the dataset\n", + "We have written a slightly more general version of the DenseModel that you used in the previous exercise. Ours requires two inputs:\n", + "
  • input_shape: the shape of the input images, as a tuple
  • \n", + "
  • num_classes: the number of classes in the dataset
  • \n", "\n", "Create a dense model with the right inputs and load the weights from the checkpoint.\n", - "
    " + "
    " ] }, { "cell_type": "code", "execution_count": null, - "id": "a6ce29c6", + "id": "fb0683a8", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -155,7 +155,7 @@ }, { "cell_type": "markdown", - "id": "3f2739bc", + "id": "a3326bfe", "metadata": { "lines_to_next_cell": 0 }, @@ -166,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9423e54b", + "id": "19354115", "metadata": {}, "outputs": [], "source": [ @@ -193,7 +193,7 @@ }, { "cell_type": "markdown", - "id": "cecb6c11", + "id": "ff293353", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -203,7 +203,7 @@ }, { "cell_type": "markdown", - "id": "c57a8a9d", + "id": "c8f3e86d", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -216,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a596affd", + "id": "c6071dca", "metadata": { "tags": [] }, @@ -234,7 +234,7 @@ }, { "cell_type": "markdown", - "id": "06db8b81", + "id": "7be4fc86", "metadata": { "tags": [] }, @@ -250,7 +250,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a8252672", + "id": "4766f414", "metadata": { "tags": [ "task" @@ -271,7 +271,7 @@ { "cell_type": "code", "execution_count": null, - "id": "115b94e7", + "id": "5395d4a2", "metadata": { "tags": [] }, @@ -284,7 +284,7 @@ }, { "cell_type": "markdown", - "id": "6e281067", + "id": "f75d2492", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -296,7 +296,7 @@ { "cell_type": "code", "execution_count": null, - "id": "07528ee6", + "id": "9d788363", "metadata": { "tags": [] }, @@ -324,7 +324,7 @@ { "cell_type": "code", "execution_count": null, - "id": "780d14de", + "id": "56d5e9f3", "metadata": { "tags": [] }, @@ -337,13 +337,13 @@ }, { "cell_type": "markdown", - "id": "33d9db66", + "id": "4c28aff0", "metadata": { "lines_to_next_cell": 2 }, "source": [ "\n", - "The attributions are shown as a heatmap. The brighter the pixel, the more important this attribution method thinks that it is.\n", + "The attributions are shown as a heatmap. The closer to 1 the pixel value, the more important this attribution method thinks that it is.\n", "As you can see, it is pretty good at recognizing the number within the image.\n", "As we know, however, it is not the digit itself that is important for the classification, it is the color!\n", "Although the method is picking up really well on the region of interest, it would be difficult to conclude from this that it is the color that matters." @@ -351,7 +351,7 @@ }, { "cell_type": "markdown", - "id": "81725a7f", + "id": "e871c574", "metadata": { "lines_to_next_cell": 0 }, @@ -364,7 +364,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cba802d4", + "id": "9bd13121", "metadata": {}, "outputs": [], "source": [ @@ -389,13 +389,13 @@ }, { "cell_type": "markdown", - "id": "7254eab3", + "id": "e302a5ca", "metadata": { "lines_to_next_cell": 0 }, "source": [ "We get some better clues when looking at the attributions in color.\n", - "The highlighting doesn't just happen in the region with number, but also seems to hapen in a channel that matches the color of the image.\n", + "The highlighting doesn't just happen in the region with number, but also seems to happen in a channel that matches the color of the image.\n", "Just based on this, however, we don't get much more information than we got from the images themselves.\n", "\n", "If we didn't know in advance, it is unclear whether the color or the number is the most important feature for the classifier." @@ -403,7 +403,7 @@ }, { "cell_type": "markdown", - "id": "2fd14add", + "id": "844f37f0", "metadata": {}, "source": [ "\n", @@ -429,7 +429,7 @@ }, { "cell_type": "markdown", - "id": "9e88a188", + "id": "d6e8e759", "metadata": {}, "source": [ "

    Task 2.3: Use random noise as a baseline

    \n", @@ -441,7 +441,7 @@ { "cell_type": "code", "execution_count": null, - "id": "94ae54d4", + "id": "74344ee8", "metadata": { "tags": [ "task" @@ -453,16 +453,17 @@ "random_baselines = ... # TODO Change\n", "# Generate the attributions\n", "attributions_random = integrated_gradients.attribute(...) # TODO Change\n", + "attributions_random = attributions_random.cpu().numpy()\n", "\n", "# Plotting\n", - "for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()):\n", + "for attr, im, lbl in zip(attributions_random, x.cpu().numpy(), y.cpu().numpy()):\n", " print(f\"Class {lbl}\")\n", - " visualize_attribution(attr, im)" + " visualize_color_attribution(attr, im)" ] }, { "cell_type": "markdown", - "id": "8d7fbe0e", + "id": "6d560996", "metadata": { "tags": [] }, @@ -476,7 +477,7 @@ { "cell_type": "code", "execution_count": null, - "id": "438f77b4", + "id": "e15c02f7", "metadata": { "tags": [ "task" @@ -491,15 +492,16 @@ "# Generate the attributions\n", "attributions_blurred = integrated_gradients.attribute(...) # TODO Fill\n", "\n", + "attributions_blurred = attributions_blurred.cpu().numpy()\n", "# Plotting\n", - "for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()):\n", + "for attr, im, lbl in zip(attributions_blurred, x.cpu().numpy(), y.cpu().numpy()):\n", " print(f\"Class {lbl}\")\n", " visualize_color_attribution(attr, im)" ] }, { "cell_type": "markdown", - "id": "220cc456", + "id": "de810272", "metadata": { "tags": [] }, @@ -515,7 +517,7 @@ }, { "cell_type": "markdown", - "id": "83bbf9b4", + "id": "7a6f89dc", "metadata": {}, "source": [ "

    BONUS Task: Using different attributions.

    \n", @@ -529,11 +531,11 @@ }, { "cell_type": "markdown", - "id": "69d66239", + "id": "f3e293c5", "metadata": {}, "source": [ "

    Checkpoint 2

    \n", - "Let us know on the exercise chat when you've reached this point!\n", + "Put up your green sticky note when you've reached this point!\n", "\n", "At this point we have:\n", "\n", @@ -549,7 +551,7 @@ }, { "cell_type": "markdown", - "id": "0d20d9cf", + "id": "fb3048e1", "metadata": { "lines_to_next_cell": 0 }, @@ -562,9 +564,9 @@ "**What is a counterfactual?**\n", "\n", "You've learned about adversarial examples in the lecture on failure modes. These are the imperceptible or noisy changes to an image that drastically changes a classifier's opinion.\n", - "Counterfactual explanations are the useful cousins of adversarial examples. They are *perceptible* and *informative* changes to an image that changes a classifier's opinion.\n", + "Counterfactual explanations are the useful cousins of adversarial examples. They are *perceptible* and *informative* changes to an image that change a classifier's opinion.\n", "\n", - "In the image below you can see the difference between the two. In the first column are MNIST images along with their classifictaions, and in the second column are counterfactual explanations to *change* that class. You can see that in both cases a human being would (hopefully) agree with the new classification. By comparing the two columns, we can therefore begin to define what makes each digit special.\n", + "In the image below you can see the difference between the two. In the first column are (non-color) MNIST images along with their classifications, and in the second column are counterfactual explanations to *change* that class. You can see that in both cases a human being would (hopefully) agree with the new classification. By comparing the two columns, we can therefore begin to define what makes each digit special.\n", "\n", "In contrast, the third and fourth columns show an MNIST image and a corresponding adversarial example. Here the network returns a prediction that most human beings (who aren't being facetious) would strongly disagree with.\n", "\n", @@ -577,7 +579,7 @@ }, { "cell_type": "markdown", - "id": "18cda8e3", + "id": "88f77592", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -600,7 +602,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a4ea7f03", + "id": "6742eca7", "metadata": {}, "outputs": [], "source": [ @@ -632,7 +634,7 @@ }, { "cell_type": "markdown", - "id": "53e15fe2", + "id": "3ed703e3", "metadata": { "lines_to_next_cell": 0 }, @@ -647,7 +649,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35e552da", + "id": "57a7d702", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -656,7 +658,7 @@ }, "outputs": [], "source": [ - "style_size = ... # TODO choose a size for the style space\n", + "style_size = 3\n", "unet_depth = ... # TODO Choose a depth for the UNet\n", "style_encoder = DenseModel(\n", " input_shape=..., num_classes=... # How big is the style space?\n", @@ -668,14 +670,14 @@ }, { "cell_type": "markdown", - "id": "49a9a89d", + "id": "fdf97172", "metadata": { "tags": [] }, "source": [ "

    Hyper-parameter choices

    \n", "
      \n", - "
    • Are any of the hyperparameters you choose above constrained in some way?
    • \n", + "
    • Are any of the hyperparameters above constrained in some way?
    • \n", "
    • What would happen if you chose a depth of 10 for the UNet?
    • \n", "
    • Is there a minimum size for the style space? Why or why not?
    • \n", "
    " @@ -683,7 +685,7 @@ }, { "cell_type": "markdown", - "id": "fb9b4a59", + "id": "bcb08a78", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -700,7 +702,7 @@ { "cell_type": "code", "execution_count": null, - "id": "811aec8a", + "id": "349544f2", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -714,7 +716,7 @@ }, { "cell_type": "markdown", - "id": "ef8dd1e5", + "id": "fd541400", "metadata": { "lines_to_next_cell": 0 }, @@ -725,7 +727,7 @@ { "cell_type": "code", "execution_count": null, - "id": "069f2804", + "id": "1e91e416", "metadata": {}, "outputs": [], "source": [ @@ -735,7 +737,7 @@ }, { "cell_type": "markdown", - "id": "380256c9", + "id": "b0377825", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -753,7 +755,7 @@ { "cell_type": "code", "execution_count": null, - "id": "576200cc", + "id": "f4939a8d", "metadata": { "lines_to_next_cell": 0 }, @@ -765,7 +767,7 @@ }, { "cell_type": "markdown", - "id": "2885a3a0", + "id": "9a6ca642", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -784,7 +786,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9cc5eb23", + "id": "382fa57e", "metadata": {}, "outputs": [], "source": [ @@ -793,7 +795,7 @@ }, { "cell_type": "markdown", - "id": "5eb04071", + "id": "ccf3b275", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -809,7 +811,7 @@ { "cell_type": "code", "execution_count": null, - "id": "805dfbf2", + "id": "0a424199", "metadata": {}, "outputs": [], "source": [ @@ -818,7 +820,7 @@ }, { "cell_type": "markdown", - "id": "8fab1a3e", + "id": "fe335f99", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -830,7 +832,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dc899114", + "id": "977ce4c8", "metadata": {}, "outputs": [], "source": [ @@ -843,7 +845,7 @@ }, { "cell_type": "markdown", - "id": "7e13436e", + "id": "7bf8cb24", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -857,7 +859,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1cb60fb1", + "id": "0d5ee1e1", "metadata": {}, "outputs": [], "source": [ @@ -869,7 +871,7 @@ }, { "cell_type": "markdown", - "id": "e8441eda", + "id": "43c93243", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -889,7 +891,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6bb72378", + "id": "9a789ba4", "metadata": {}, "outputs": [], "source": [ @@ -913,7 +915,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af5d018a", + "id": "91b15866", "metadata": {}, "outputs": [], "source": [ @@ -923,7 +925,26 @@ }, { "cell_type": "markdown", - "id": "97aaeef6", + "id": "87d5a8a1", + "metadata": {}, + "source": [ + "

    Checkpoint 3

    \n", + "Put up your green sticky note when you've reached this point!\n", + "\n", + "At this point we have:\n", + "\n", + "- Loaded a classifier that classifies MNIST-like images by color, but we don't know how!\n", + "- Tried applying Integrated Gradients to find out what the classifier is looking at - with little success.\n", + "- Discovered the effect of changing the baseline on the output of integrated gradients.\n", + "- Defined the hyperparameters for a StarGAN to create counterfactual images.\n", + "\n", + "Next up, we will define the training loop for the StarGAN.\n", + "
    " + ] + }, + { + "cell_type": "markdown", + "id": "3a1e7dd9", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -945,7 +966,7 @@ }, { "cell_type": "markdown", - "id": "74570a78", + "id": "c6348be7", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -957,7 +978,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2b7a8a55", + "id": "f5ed4de5", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1068,7 +1089,7 @@ }, { "cell_type": "markdown", - "id": "41e0e738", + "id": "f6cd135b", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1080,7 +1101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b49fd330", + "id": "cdecbbbe", "metadata": {}, "outputs": [], "source": [ @@ -1096,7 +1117,7 @@ }, { "cell_type": "markdown", - "id": "77d4f778", + "id": "2c7d13e4", "metadata": { "tags": [] }, @@ -1111,7 +1132,7 @@ }, { "cell_type": "markdown", - "id": "2258df98", + "id": "689db81d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1123,7 +1144,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fa248f8b", + "id": "c3f514fa", "metadata": {}, "outputs": [], "source": [ @@ -1145,7 +1166,7 @@ }, { "cell_type": "markdown", - "id": "c2db79f3", + "id": "27ee28e5", "metadata": { "tags": [] }, @@ -1155,13 +1176,13 @@ "The same method can be used to create a StarGAN with different basic elements.\n", "For example, you can change the archictecture of the generators, or of the discriminator to better fit your data in the future.\n", "\n", - "You know the drill... let us know on the exercise chat when you have arrived here!\n", + "You know the drill... put up your green sticky note when you have arrived here!\n", "
    " ] }, { "cell_type": "markdown", - "id": "de5cb982", + "id": "e1c592af", "metadata": { "tags": [] }, @@ -1171,7 +1192,7 @@ }, { "cell_type": "markdown", - "id": "ac566b2e", + "id": "d2374be1", "metadata": { "tags": [] }, @@ -1188,7 +1209,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af5bbb66", + "id": "c038ee90", "metadata": { "title": "Loading the test dataset" }, @@ -1208,7 +1229,7 @@ }, { "cell_type": "markdown", - "id": "e5392251", + "id": "2dd7e283", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1220,7 +1241,7 @@ { "cell_type": "code", "execution_count": null, - "id": "02808bdf", + "id": "1910ed0f", "metadata": {}, "outputs": [], "source": [ @@ -1233,7 +1254,7 @@ }, { "cell_type": "markdown", - "id": "e2e7f289", + "id": "99188e3e", "metadata": { "lines_to_next_cell": 0 }, @@ -1243,7 +1264,7 @@ }, { "cell_type": "markdown", - "id": "9b936567", + "id": "fe2b4929", "metadata": { "lines_to_next_cell": 0 }, @@ -1261,7 +1282,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7e5d760d", + "id": "30cd9209", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1297,7 +1318,7 @@ }, { "cell_type": "markdown", - "id": "35dd9913", + "id": "ef56e941", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1309,7 +1330,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13fa9bfa", + "id": "1612f8ce", "metadata": {}, "outputs": [], "source": [ @@ -1322,7 +1343,7 @@ }, { "cell_type": "markdown", - "id": "449428b9", + "id": "2239beb9", "metadata": { "tags": [] }, @@ -1337,7 +1358,7 @@ }, { "cell_type": "markdown", - "id": "cb3d6985", + "id": "168352ff", "metadata": { "tags": [] }, @@ -1348,7 +1369,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3096db55", + "id": "f24cdc48", "metadata": {}, "outputs": [], "source": [ @@ -1362,7 +1383,7 @@ }, { "cell_type": "markdown", - "id": "19fd90a6", + "id": "b093ffc8", "metadata": { "tags": [] }, @@ -1377,7 +1398,7 @@ }, { "cell_type": "markdown", - "id": "e0967ec3", + "id": "be5c5a98", "metadata": { "lines_to_next_cell": 0 }, @@ -1392,7 +1413,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b3312425", + "id": "0891fcf7", "metadata": {}, "outputs": [], "source": [ @@ -1413,7 +1434,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42c617e2", + "id": "b080dfe1", "metadata": { "title": "Another visualization function" }, @@ -1442,7 +1463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3075be99", + "id": "e41e00c9", "metadata": { "lines_to_next_cell": 0 }, @@ -1458,7 +1479,7 @@ }, { "cell_type": "markdown", - "id": "d70f3ff4", + "id": "ac7678a7", "metadata": { "lines_to_next_cell": 0 }, @@ -1474,7 +1495,7 @@ }, { "cell_type": "markdown", - "id": "593ba2db", + "id": "3e06986c", "metadata": { "lines_to_next_cell": 0 }, @@ -1489,7 +1510,7 @@ }, { "cell_type": "markdown", - "id": "863c8dca", + "id": "7c9a99c4", "metadata": { "lines_to_next_cell": 0 }, @@ -1503,7 +1524,7 @@ { "cell_type": "code", "execution_count": null, - "id": "602db341", + "id": "b1affa61", "metadata": { "lines_to_next_cell": 0 }, @@ -1523,7 +1544,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cf1e78db", + "id": "4bf0e982", "metadata": { "lines_to_next_cell": 0 }, @@ -1559,7 +1580,7 @@ }, { "cell_type": "markdown", - "id": "6e37fd94", + "id": "7f8f6ca5", "metadata": { "lines_to_next_cell": 0 }, @@ -1573,7 +1594,7 @@ }, { "cell_type": "markdown", - "id": "5b55e69e", + "id": "537edaa0", "metadata": { "lines_to_next_cell": 0 }, @@ -1589,7 +1610,7 @@ }, { "cell_type": "markdown", - "id": "3862ab46", + "id": "c4dd727d", "metadata": { "lines_to_next_cell": 0 }, @@ -1605,7 +1626,7 @@ }, { "cell_type": "markdown", - "id": "bedeaf5c", + "id": "c5a5e2c0", "metadata": { "lines_to_next_cell": 0 }, @@ -1628,7 +1649,7 @@ }, { "cell_type": "markdown", - "id": "75e28b2c", + "id": "a9ede52b", "metadata": {}, "source": [ "

    Task 5.1: Explore the style space

    \n", @@ -1640,7 +1661,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d2ffe8e5", + "id": "af4a9d95", "metadata": {}, "outputs": [], "source": [ @@ -1675,7 +1696,7 @@ }, { "cell_type": "markdown", - "id": "559a66a2", + "id": "be742cb1", "metadata": { "lines_to_next_cell": 0 }, @@ -1691,7 +1712,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4c268ad8", + "id": "09032a46", "metadata": { "lines_to_next_cell": 0 }, @@ -1718,7 +1739,7 @@ }, { "cell_type": "markdown", - "id": "7e9d9184", + "id": "4d9a7277", "metadata": { "lines_to_next_cell": 0 }, @@ -1732,7 +1753,7 @@ }, { "cell_type": "markdown", - "id": "45005d21", + "id": "3016bf5c", "metadata": { "lines_to_next_cell": 0 }, @@ -1749,7 +1770,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b6e19ec8", + "id": "b47bc9a2", "metadata": {}, "outputs": [], "source": [ @@ -1772,7 +1793,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7aba6e43", + "id": "5434a70a", "metadata": { "lines_to_next_cell": 0 }, @@ -1781,7 +1802,7 @@ }, { "cell_type": "markdown", - "id": "c90115e2", + "id": "9d066df4", "metadata": {}, "source": [ "

    Questions

    \n", @@ -1793,7 +1814,7 @@ }, { "cell_type": "markdown", - "id": "6dd82862", + "id": "e4295029", "metadata": {}, "source": [ "

    Checkpoint 5

    \n", @@ -1811,7 +1832,7 @@ }, { "cell_type": "markdown", - "id": "b6389689", + "id": "3cd7976f", "metadata": { "lines_to_next_cell": 0 }, diff --git a/solution.ipynb b/solution.ipynb index d22fd9a..9b17243 100644 --- a/solution.ipynb +++ b/solution.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "0e557335", + "id": "eeffd47d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "8667a26e", + "id": "d9b65175", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "e36eca8a", + "id": "025f7566", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ffea56f9", + "id": "7f4771f3", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "55cab99a", + "id": "9d60bf35", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1fe21cf9", + "id": "4acd91e9", "metadata": {}, "outputs": [], "source": [ @@ -102,35 +102,35 @@ }, { "cell_type": "markdown", - "id": "610b5cdb", + "id": "9e70adfb", "metadata": { "lines_to_next_cell": 0 }, "source": [ - "We have pre-traiend a classifier for you on this dataset. It is the same architecture classifier as you used in the Failure Modes exercise: a `DenseModel`.\n", + "During the setup you have pre-traiend a classifier on this dataset. It is the same architecture classifier as you used in the Failure Modes exercise: a `DenseModel`.\n", "Let's load that classifier now!" ] }, { "cell_type": "markdown", - "id": "275b9fc0", + "id": "aa1a58cf", "metadata": { "lines_to_next_cell": 0 }, "source": [ "

    Task 1.1: Load the classifier

    \n", - "We have written a slightly more general version of the `DenseModel` that you used in the previous exercise. Ours requires two inputs:\n", - "- `input_shape`: the shape of the input images, as a tuple\n", - "- `num_classes`: the number of classes in the dataset\n", + "We have written a slightly more general version of the DenseModel that you used in the previous exercise. Ours requires two inputs:\n", + "
  • input_shape: the shape of the input images, as a tuple
  • \n", + "
  • num_classes: the number of classes in the dataset
  • \n", "\n", "Create a dense model with the right inputs and load the weights from the checkpoint.\n", - "
    " + "
    " ] }, { "cell_type": "code", "execution_count": null, - "id": "493d2b7b", + "id": "95d9fa51", "metadata": { "tags": [ "solution" @@ -154,7 +154,7 @@ }, { "cell_type": "markdown", - "id": "3f2739bc", + "id": "a3326bfe", "metadata": { "lines_to_next_cell": 0 }, @@ -165,7 +165,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9423e54b", + "id": "19354115", "metadata": {}, "outputs": [], "source": [ @@ -192,7 +192,7 @@ }, { "cell_type": "markdown", - "id": "cecb6c11", + "id": "ff293353", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -202,7 +202,7 @@ }, { "cell_type": "markdown", - "id": "c57a8a9d", + "id": "c8f3e86d", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -215,7 +215,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a596affd", + "id": "c6071dca", "metadata": { "tags": [] }, @@ -233,7 +233,7 @@ }, { "cell_type": "markdown", - "id": "06db8b81", + "id": "7be4fc86", "metadata": { "tags": [] }, @@ -249,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78463dad", + "id": "e5f2cecc", "metadata": { "tags": [ "solution" @@ -273,7 +273,7 @@ { "cell_type": "code", "execution_count": null, - "id": "115b94e7", + "id": "5395d4a2", "metadata": { "tags": [] }, @@ -286,7 +286,7 @@ }, { "cell_type": "markdown", - "id": "6e281067", + "id": "f75d2492", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -298,7 +298,7 @@ { "cell_type": "code", "execution_count": null, - "id": "07528ee6", + "id": "9d788363", "metadata": { "tags": [] }, @@ -326,7 +326,7 @@ { "cell_type": "code", "execution_count": null, - "id": "780d14de", + "id": "56d5e9f3", "metadata": { "tags": [] }, @@ -339,13 +339,13 @@ }, { "cell_type": "markdown", - "id": "33d9db66", + "id": "4c28aff0", "metadata": { "lines_to_next_cell": 2 }, "source": [ "\n", - "The attributions are shown as a heatmap. The brighter the pixel, the more important this attribution method thinks that it is.\n", + "The attributions are shown as a heatmap. The closer to 1 the pixel value, the more important this attribution method thinks that it is.\n", "As you can see, it is pretty good at recognizing the number within the image.\n", "As we know, however, it is not the digit itself that is important for the classification, it is the color!\n", "Although the method is picking up really well on the region of interest, it would be difficult to conclude from this that it is the color that matters." @@ -353,7 +353,7 @@ }, { "cell_type": "markdown", - "id": "81725a7f", + "id": "e871c574", "metadata": { "lines_to_next_cell": 0 }, @@ -366,7 +366,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cba802d4", + "id": "9bd13121", "metadata": {}, "outputs": [], "source": [ @@ -391,13 +391,13 @@ }, { "cell_type": "markdown", - "id": "7254eab3", + "id": "e302a5ca", "metadata": { "lines_to_next_cell": 0 }, "source": [ "We get some better clues when looking at the attributions in color.\n", - "The highlighting doesn't just happen in the region with number, but also seems to hapen in a channel that matches the color of the image.\n", + "The highlighting doesn't just happen in the region with number, but also seems to happen in a channel that matches the color of the image.\n", "Just based on this, however, we don't get much more information than we got from the images themselves.\n", "\n", "If we didn't know in advance, it is unclear whether the color or the number is the most important feature for the classifier." @@ -405,7 +405,7 @@ }, { "cell_type": "markdown", - "id": "2fd14add", + "id": "844f37f0", "metadata": {}, "source": [ "\n", @@ -431,7 +431,7 @@ }, { "cell_type": "markdown", - "id": "9e88a188", + "id": "d6e8e759", "metadata": {}, "source": [ "

    Task 2.3: Use random noise as a baseline

    \n", @@ -443,7 +443,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3823dfba", + "id": "7135e83d", "metadata": { "tags": [ "solution" @@ -460,16 +460,16 @@ "attributions_random = integrated_gradients.attribute(\n", " x, target=y, baselines=random_baselines\n", ")\n", - "\n", + "attributions_random = attributions_random.cpu().numpy()\n", "# Plotting\n", - "for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()):\n", + "for attr, im, lbl in zip(attributions_random, x.cpu().numpy(), y.cpu().numpy()):\n", " print(f\"Class {lbl}\")\n", " visualize_color_attribution(attr, im)" ] }, { "cell_type": "markdown", - "id": "8d7fbe0e", + "id": "6d560996", "metadata": { "tags": [] }, @@ -483,7 +483,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22c8c0fe", + "id": "6920e975", "metadata": { "tags": [ "solution" @@ -503,15 +503,17 @@ " x, target=y, baselines=blurred_baselines\n", ")\n", "\n", + "attributions_blurred = attributions_blurred.cpu().numpy()\n", + "\n", "# Plotting\n", - "for attr, im, lbl in zip(attributions, x.cpu().numpy(), y.cpu().numpy()):\n", + "for attr, im, lbl in zip(attributions_blurred, x.cpu().numpy(), y.cpu().numpy()):\n", " print(f\"Class {lbl}\")\n", " visualize_color_attribution(attr, im)" ] }, { "cell_type": "markdown", - "id": "220cc456", + "id": "de810272", "metadata": { "tags": [] }, @@ -527,7 +529,7 @@ }, { "cell_type": "markdown", - "id": "83bbf9b4", + "id": "7a6f89dc", "metadata": {}, "source": [ "

    BONUS Task: Using different attributions.

    \n", @@ -541,11 +543,11 @@ }, { "cell_type": "markdown", - "id": "69d66239", + "id": "f3e293c5", "metadata": {}, "source": [ "

    Checkpoint 2

    \n", - "Let us know on the exercise chat when you've reached this point!\n", + "Put up your green sticky note when you've reached this point!\n", "\n", "At this point we have:\n", "\n", @@ -561,7 +563,7 @@ }, { "cell_type": "markdown", - "id": "0d20d9cf", + "id": "fb3048e1", "metadata": { "lines_to_next_cell": 0 }, @@ -574,9 +576,9 @@ "**What is a counterfactual?**\n", "\n", "You've learned about adversarial examples in the lecture on failure modes. These are the imperceptible or noisy changes to an image that drastically changes a classifier's opinion.\n", - "Counterfactual explanations are the useful cousins of adversarial examples. They are *perceptible* and *informative* changes to an image that changes a classifier's opinion.\n", + "Counterfactual explanations are the useful cousins of adversarial examples. They are *perceptible* and *informative* changes to an image that change a classifier's opinion.\n", "\n", - "In the image below you can see the difference between the two. In the first column are MNIST images along with their classifictaions, and in the second column are counterfactual explanations to *change* that class. You can see that in both cases a human being would (hopefully) agree with the new classification. By comparing the two columns, we can therefore begin to define what makes each digit special.\n", + "In the image below you can see the difference between the two. In the first column are (non-color) MNIST images along with their classifications, and in the second column are counterfactual explanations to *change* that class. You can see that in both cases a human being would (hopefully) agree with the new classification. By comparing the two columns, we can therefore begin to define what makes each digit special.\n", "\n", "In contrast, the third and fourth columns show an MNIST image and a corresponding adversarial example. Here the network returns a prediction that most human beings (who aren't being facetious) would strongly disagree with.\n", "\n", @@ -589,7 +591,7 @@ }, { "cell_type": "markdown", - "id": "18cda8e3", + "id": "88f77592", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -612,7 +614,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a4ea7f03", + "id": "6742eca7", "metadata": {}, "outputs": [], "source": [ @@ -644,7 +646,7 @@ }, { "cell_type": "markdown", - "id": "53e15fe2", + "id": "3ed703e3", "metadata": { "lines_to_next_cell": 0 }, @@ -659,7 +661,7 @@ { "cell_type": "code", "execution_count": null, - "id": "33e1c7fa", + "id": "981023f6", "metadata": { "tags": [ "solution" @@ -676,14 +678,14 @@ }, { "cell_type": "markdown", - "id": "49a9a89d", + "id": "fdf97172", "metadata": { "tags": [] }, "source": [ "

    Hyper-parameter choices

    \n", "
      \n", - "
    • Are any of the hyperparameters you choose above constrained in some way?
    • \n", + "
    • Are any of the hyperparameters above constrained in some way?
    • \n", "
    • What would happen if you chose a depth of 10 for the UNet?
    • \n", "
    • Is there a minimum size for the style space? Why or why not?
    • \n", "
    " @@ -691,7 +693,7 @@ }, { "cell_type": "markdown", - "id": "fb9b4a59", + "id": "bcb08a78", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -708,7 +710,7 @@ { "cell_type": "code", "execution_count": null, - "id": "326f1c50", + "id": "fa38c00f", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -722,7 +724,7 @@ }, { "cell_type": "markdown", - "id": "ef8dd1e5", + "id": "fd541400", "metadata": { "lines_to_next_cell": 0 }, @@ -733,7 +735,7 @@ { "cell_type": "code", "execution_count": null, - "id": "069f2804", + "id": "1e91e416", "metadata": {}, "outputs": [], "source": [ @@ -743,7 +745,7 @@ }, { "cell_type": "markdown", - "id": "380256c9", + "id": "b0377825", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -761,7 +763,7 @@ { "cell_type": "code", "execution_count": null, - "id": "576200cc", + "id": "f4939a8d", "metadata": { "lines_to_next_cell": 0 }, @@ -773,7 +775,7 @@ }, { "cell_type": "markdown", - "id": "2885a3a0", + "id": "9a6ca642", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -792,7 +794,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9cc5eb23", + "id": "382fa57e", "metadata": {}, "outputs": [], "source": [ @@ -801,7 +803,7 @@ }, { "cell_type": "markdown", - "id": "5eb04071", + "id": "ccf3b275", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -817,7 +819,7 @@ { "cell_type": "code", "execution_count": null, - "id": "805dfbf2", + "id": "0a424199", "metadata": {}, "outputs": [], "source": [ @@ -826,7 +828,7 @@ }, { "cell_type": "markdown", - "id": "8fab1a3e", + "id": "fe335f99", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -838,7 +840,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dc899114", + "id": "977ce4c8", "metadata": {}, "outputs": [], "source": [ @@ -851,7 +853,7 @@ }, { "cell_type": "markdown", - "id": "7e13436e", + "id": "7bf8cb24", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -865,7 +867,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1cb60fb1", + "id": "0d5ee1e1", "metadata": {}, "outputs": [], "source": [ @@ -877,7 +879,7 @@ }, { "cell_type": "markdown", - "id": "e8441eda", + "id": "43c93243", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -897,7 +899,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6bb72378", + "id": "9a789ba4", "metadata": {}, "outputs": [], "source": [ @@ -921,7 +923,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af5d018a", + "id": "91b15866", "metadata": {}, "outputs": [], "source": [ @@ -931,7 +933,26 @@ }, { "cell_type": "markdown", - "id": "97aaeef6", + "id": "87d5a8a1", + "metadata": {}, + "source": [ + "

    Checkpoint 3

    \n", + "Put up your green sticky note when you've reached this point!\n", + "\n", + "At this point we have:\n", + "\n", + "- Loaded a classifier that classifies MNIST-like images by color, but we don't know how!\n", + "- Tried applying Integrated Gradients to find out what the classifier is looking at - with little success.\n", + "- Discovered the effect of changing the baseline on the output of integrated gradients.\n", + "- Defined the hyperparameters for a StarGAN to create counterfactual images.\n", + "\n", + "Next up, we will define the training loop for the StarGAN.\n", + "
    " + ] + }, + { + "cell_type": "markdown", + "id": "3a1e7dd9", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -953,7 +974,7 @@ }, { "cell_type": "markdown", - "id": "74570a78", + "id": "c6348be7", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -965,7 +986,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e53b9f64", + "id": "e67449b2", "metadata": { "lines_to_next_cell": 2, "tags": [ @@ -1035,7 +1056,7 @@ }, { "cell_type": "markdown", - "id": "41e0e738", + "id": "f6cd135b", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1047,7 +1068,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b49fd330", + "id": "cdecbbbe", "metadata": {}, "outputs": [], "source": [ @@ -1063,7 +1084,7 @@ }, { "cell_type": "markdown", - "id": "77d4f778", + "id": "2c7d13e4", "metadata": { "tags": [] }, @@ -1078,7 +1099,7 @@ }, { "cell_type": "markdown", - "id": "2258df98", + "id": "689db81d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1090,7 +1111,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fa248f8b", + "id": "c3f514fa", "metadata": {}, "outputs": [], "source": [ @@ -1112,7 +1133,7 @@ }, { "cell_type": "markdown", - "id": "c2db79f3", + "id": "27ee28e5", "metadata": { "tags": [] }, @@ -1122,13 +1143,13 @@ "The same method can be used to create a StarGAN with different basic elements.\n", "For example, you can change the archictecture of the generators, or of the discriminator to better fit your data in the future.\n", "\n", - "You know the drill... let us know on the exercise chat when you have arrived here!\n", + "You know the drill... put up your green sticky note when you have arrived here!\n", "
    " ] }, { "cell_type": "markdown", - "id": "de5cb982", + "id": "e1c592af", "metadata": { "tags": [] }, @@ -1138,7 +1159,7 @@ }, { "cell_type": "markdown", - "id": "ac566b2e", + "id": "d2374be1", "metadata": { "tags": [] }, @@ -1155,7 +1176,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af5bbb66", + "id": "c038ee90", "metadata": { "title": "Loading the test dataset" }, @@ -1175,7 +1196,7 @@ }, { "cell_type": "markdown", - "id": "e5392251", + "id": "2dd7e283", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1187,7 +1208,7 @@ { "cell_type": "code", "execution_count": null, - "id": "02808bdf", + "id": "1910ed0f", "metadata": {}, "outputs": [], "source": [ @@ -1200,7 +1221,7 @@ }, { "cell_type": "markdown", - "id": "e2e7f289", + "id": "99188e3e", "metadata": { "lines_to_next_cell": 0 }, @@ -1210,7 +1231,7 @@ }, { "cell_type": "markdown", - "id": "9b936567", + "id": "fe2b4929", "metadata": { "lines_to_next_cell": 0 }, @@ -1228,7 +1249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "caa3e4ec", + "id": "99768fc9", "metadata": { "tags": [ "solution" @@ -1265,7 +1286,7 @@ }, { "cell_type": "markdown", - "id": "35dd9913", + "id": "ef56e941", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1277,7 +1298,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13fa9bfa", + "id": "1612f8ce", "metadata": {}, "outputs": [], "source": [ @@ -1290,7 +1311,7 @@ }, { "cell_type": "markdown", - "id": "449428b9", + "id": "2239beb9", "metadata": { "tags": [] }, @@ -1305,7 +1326,7 @@ }, { "cell_type": "markdown", - "id": "cb3d6985", + "id": "168352ff", "metadata": { "tags": [] }, @@ -1316,7 +1337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3096db55", + "id": "f24cdc48", "metadata": {}, "outputs": [], "source": [ @@ -1330,7 +1351,7 @@ }, { "cell_type": "markdown", - "id": "19fd90a6", + "id": "b093ffc8", "metadata": { "tags": [] }, @@ -1345,7 +1366,7 @@ }, { "cell_type": "markdown", - "id": "e0967ec3", + "id": "be5c5a98", "metadata": { "lines_to_next_cell": 0 }, @@ -1360,7 +1381,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b3312425", + "id": "0891fcf7", "metadata": {}, "outputs": [], "source": [ @@ -1381,7 +1402,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42c617e2", + "id": "b080dfe1", "metadata": { "title": "Another visualization function" }, @@ -1410,7 +1431,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3075be99", + "id": "e41e00c9", "metadata": { "lines_to_next_cell": 0 }, @@ -1426,7 +1447,7 @@ }, { "cell_type": "markdown", - "id": "d70f3ff4", + "id": "ac7678a7", "metadata": { "lines_to_next_cell": 0 }, @@ -1442,7 +1463,7 @@ }, { "cell_type": "markdown", - "id": "593ba2db", + "id": "3e06986c", "metadata": { "lines_to_next_cell": 0 }, @@ -1457,7 +1478,7 @@ }, { "cell_type": "markdown", - "id": "863c8dca", + "id": "7c9a99c4", "metadata": { "lines_to_next_cell": 0 }, @@ -1471,7 +1492,7 @@ { "cell_type": "code", "execution_count": null, - "id": "602db341", + "id": "b1affa61", "metadata": { "lines_to_next_cell": 0 }, @@ -1491,7 +1512,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cf1e78db", + "id": "4bf0e982", "metadata": { "lines_to_next_cell": 0 }, @@ -1527,7 +1548,7 @@ }, { "cell_type": "markdown", - "id": "6e37fd94", + "id": "7f8f6ca5", "metadata": { "lines_to_next_cell": 0 }, @@ -1541,7 +1562,7 @@ }, { "cell_type": "markdown", - "id": "5b55e69e", + "id": "537edaa0", "metadata": { "lines_to_next_cell": 0 }, @@ -1557,7 +1578,7 @@ }, { "cell_type": "markdown", - "id": "3862ab46", + "id": "c4dd727d", "metadata": { "lines_to_next_cell": 0 }, @@ -1573,7 +1594,7 @@ }, { "cell_type": "markdown", - "id": "bedeaf5c", + "id": "c5a5e2c0", "metadata": { "lines_to_next_cell": 0 }, @@ -1596,7 +1617,7 @@ }, { "cell_type": "markdown", - "id": "75e28b2c", + "id": "a9ede52b", "metadata": {}, "source": [ "

    Task 5.1: Explore the style space

    \n", @@ -1608,7 +1629,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d2ffe8e5", + "id": "af4a9d95", "metadata": {}, "outputs": [], "source": [ @@ -1643,7 +1664,7 @@ }, { "cell_type": "markdown", - "id": "559a66a2", + "id": "be742cb1", "metadata": { "lines_to_next_cell": 0 }, @@ -1659,7 +1680,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4c268ad8", + "id": "09032a46", "metadata": { "lines_to_next_cell": 0 }, @@ -1686,7 +1707,7 @@ }, { "cell_type": "markdown", - "id": "7e9d9184", + "id": "4d9a7277", "metadata": { "lines_to_next_cell": 0 }, @@ -1700,7 +1721,7 @@ }, { "cell_type": "markdown", - "id": "45005d21", + "id": "3016bf5c", "metadata": { "lines_to_next_cell": 0 }, @@ -1717,7 +1738,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b6e19ec8", + "id": "b47bc9a2", "metadata": {}, "outputs": [], "source": [ @@ -1740,7 +1761,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7aba6e43", + "id": "5434a70a", "metadata": { "lines_to_next_cell": 0 }, @@ -1749,7 +1770,7 @@ }, { "cell_type": "markdown", - "id": "c90115e2", + "id": "9d066df4", "metadata": {}, "source": [ "

    Questions

    \n", @@ -1761,7 +1782,7 @@ }, { "cell_type": "markdown", - "id": "6dd82862", + "id": "e4295029", "metadata": {}, "source": [ "

    Checkpoint 5

    \n", @@ -1779,7 +1800,7 @@ }, { "cell_type": "markdown", - "id": "b6389689", + "id": "3cd7976f", "metadata": { "lines_to_next_cell": 0 }, @@ -1801,7 +1822,7 @@ { "cell_type": "code", "execution_count": null, - "id": "764a648d", + "id": "ffeee5b2", "metadata": { "tags": [ "solution" From 14e66a9964fe2fb0ce0f4d9c842855a2887e8873 Mon Sep 17 00:00:00 2001 From: Larissa Heinrich Date: Mon, 26 Aug 2024 19:23:36 +0000 Subject: [PATCH 03/11] change some text and html stuff --- solution.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/solution.py b/solution.py index c85b685..2d5ed2d 100644 --- a/solution.py +++ b/solution.py @@ -395,8 +395,8 @@ def visualize_color_attribution(attribution, original_image): # # We will not be using the random latent code (green, in the figure), so the model we use is made up of three networks: # - The generator - this will be the bulk of the model, and will be responsible for transforming the images: we're going to use a `UNet` -# - The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel` # - The style encoder - this will be responsible for encoding the style of the image: we're going to use a `DenseModel` +# - The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel` # # Let's start by creating these! # %% @@ -579,10 +579,10 @@ def copy_parameters(source_model, target_model): # You were given several different options in the training code below. In each case, one of the options will work, and the other will not. # Comment out the option that you think will not work. #
      -#
    • Choose the values for `set_requires_grad`. Hint: which part of the code is training the generator? Which part is training the discriminator
    • -#
    • Choose the values of `set_requires_grad`, again. Hint: you may want to switch
    • +#
    • Choose the values for set_requires_grad. Hint: which part of the code is training the generator? Which part is training the discriminator
    • +#
    • Choose the values of set_requires_grad, again. Hint: you may want to switch
    • #
    • Choose the sign of the discriminator loss. Hint: what does the discriminator want to do?
    • -# .
    • Apply the EMA update. Hint: which model do you want to update? You can look again at the code we wrote above.
    • +#
    • Apply the EMA update. Hint: which model do you want to update? You can look again at the code we wrote above.
    • #
    # Let's train the StarGAN one batch a time. # While you watch the model train, consider whether you think it will be successful at generating counterfactuals in the number of steps we give it. What is the minimum number of iterations you think are needed for this to work, and how much time do yo uthink it will take? From 3fd89af87305240da18978cc1154887d0822bbe6 Mon Sep 17 00:00:00 2001 From: Larissa Heinrich Date: Mon, 26 Aug 2024 20:15:11 +0000 Subject: [PATCH 04/11] torch inference mode and html changes --- solution.py | 70 ++++++++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/solution.py b/solution.py index 2d5ed2d..1768933 100644 --- a/solution.py +++ b/solution.py @@ -849,20 +849,20 @@ def copy_parameters(source_model, target_model): predictions = [] source_labels = [] target_labels = [] - -for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images): - for lbl in range(4): - # TODO Create the counterfactual - x_fake = generator(x.unsqueeze(0).to(device), ...) - # TODO Predict the class of the counterfactual image - pred = model(...) - - # TODO Store the source and target labels - source_labels.append(...) # The original label of the image - target_labels.append(...) # The desired label of the counterfactual image - # Store the counterfactual image and prediction - counterfactuals[lbl][i] = x_fake.cpu().detach().numpy() - predictions.append(pred.argmax().item()) +with torch.inference_mode(): + for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images): + for lbl in range(4): + # TODO Create the counterfactual + x_fake = generator(x.unsqueeze(0).to(device), ...) + # TODO Predict the class of the counterfactual image + pred = model(...) + + # TODO Store the source and target labels + source_labels.append(...) # The original label of the image + target_labels.append(...) # The desired label of the counterfactual image + # Store the counterfactual image and prediction + counterfactuals[lbl][i] = x_fake.cpu().detach().numpy() + predictions.append(pred.argmax().item()) # %% tags=["solution"] num_images = 1000 random_test_mnist = torch.utils.data.Subset( @@ -873,22 +873,22 @@ def copy_parameters(source_model, target_model): predictions = [] source_labels = [] target_labels = [] - -for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images): - for lbl in range(4): - # Create the counterfactual - x_fake = generator( - x.unsqueeze(0).to(device), prototypes[lbl].unsqueeze(0).to(device) - ) - # Predict the class of the counterfactual image - pred = model(x_fake) - - # Store the source and target labels - source_labels.append(y) # The original label of the image - target_labels.append(lbl) # The desired label of the counterfactual image - # Store the counterfactual image and prediction - counterfactuals[lbl][i] = x_fake.cpu().detach().numpy() - predictions.append(pred.argmax().item()) +with torch.inference_mode(): + for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images): + for lbl in range(4): + # Create the counterfactual + x_fake = generator( + x.unsqueeze(0).to(device), prototypes[lbl].unsqueeze(0).to(device) + ) + # Predict the class of the counterfactual image + pred = model(x_fake) + + # Store the source and target labels + source_labels.append(y) # The original label of the image + target_labels.append(lbl) # The desired label of the counterfactual image + # Store the counterfactual image and prediction + counterfactuals[lbl][i] = x_fake.cpu().detach().numpy() + predictions.append(pred.argmax().item()) # %% [markdown] tags=[] # Let's plot the confusion matrix for the counterfactual images. @@ -1161,7 +1161,6 @@ def visualize_color_attribution_and_counterfactual( plt.legend() plt.show() -# %% # %% [markdown] #

    Questions

    #
      @@ -1187,15 +1186,16 @@ def visualize_color_attribution_and_counterfactual( # If you have extra time, you can try to break the StarGAN! # There are a lot of little things that we did to make sure that it runs correctly - but what if we didn't? # Some things you might want to try: -# - What happens if you don't use the EMA model? -# - What happens if you change the learning rates? -# - What happens if you add a Sigmoid activation to the output of the style encoder? +#
    • What happens if you don't use the EMA model?
    • +#
    • What happens if you change the learning rates?
    • +#
    • What happens if you add a Sigmoid activation to the output of the style encoder?
    • # See what else you can think of, and see how finnicky training a GAN can be! ## %% [markdown] tags=["solution"] # The colors for the classes are sampled from matplotlib colormaps! They are the four seasons: spring, summer, autumn, and winter. # Check your style space again to see if you can see the patterns now! -# %% tags=["solution"] + +## %% tags=["solution"] # Let's plot the colormaps import matplotlib as mpl import numpy as np From 51c8a80ff221fc4e1aa7dc6c020706eb4321a4d6 Mon Sep 17 00:00:00 2001 From: neptunes5thmoon Date: Mon, 26 Aug 2024 20:15:58 +0000 Subject: [PATCH 05/11] Commit from GitHub Actions (Build Notebooks) --- exercise.ipynb | 295 ++++++++++++++++++++++++++----------------------- solution.ipynb | 281 +++++++++++++++++++++------------------------- 2 files changed, 288 insertions(+), 288 deletions(-) diff --git a/exercise.ipynb b/exercise.ipynb index bde574d..e8f9a5e 100644 --- a/exercise.ipynb +++ b/exercise.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "eeffd47d", + "id": "c25b89a0", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "d9b65175", + "id": "b896f444", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "025f7566", + "id": "4633760c", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7f4771f3", + "id": "05b99695", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "9d60bf35", + "id": "b124e6c9", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4acd91e9", + "id": "ce4006c6", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "9e70adfb", + "id": "3d2f6db9", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "aa1a58cf", + "id": "42341879", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb0683a8", + "id": "d4f52196", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -155,7 +155,7 @@ }, { "cell_type": "markdown", - "id": "a3326bfe", + "id": "391c84f7", "metadata": { "lines_to_next_cell": 0 }, @@ -166,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19354115", + "id": "b06f0958", "metadata": {}, "outputs": [], "source": [ @@ -193,7 +193,7 @@ }, { "cell_type": "markdown", - "id": "ff293353", + "id": "9192b9c5", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -203,7 +203,7 @@ }, { "cell_type": "markdown", - "id": "c8f3e86d", + "id": "91671c93", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -216,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c6071dca", + "id": "b34f1344", "metadata": { "tags": [] }, @@ -234,7 +234,7 @@ }, { "cell_type": "markdown", - "id": "7be4fc86", + "id": "2c5a1bae", "metadata": { "tags": [] }, @@ -250,7 +250,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4766f414", + "id": "ac553834", "metadata": { "tags": [ "task" @@ -271,7 +271,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5395d4a2", + "id": "8b00e344", "metadata": { "tags": [] }, @@ -284,7 +284,7 @@ }, { "cell_type": "markdown", - "id": "f75d2492", + "id": "86339f4b", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -296,7 +296,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9d788363", + "id": "9aaa3fbf", "metadata": { "tags": [] }, @@ -324,7 +324,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56d5e9f3", + "id": "cdee9934", "metadata": { "tags": [] }, @@ -337,7 +337,7 @@ }, { "cell_type": "markdown", - "id": "4c28aff0", + "id": "f1c23e88", "metadata": { "lines_to_next_cell": 2 }, @@ -351,7 +351,7 @@ }, { "cell_type": "markdown", - "id": "e871c574", + "id": "5de8d1dd", "metadata": { "lines_to_next_cell": 0 }, @@ -364,7 +364,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9bd13121", + "id": "1a885c54", "metadata": {}, "outputs": [], "source": [ @@ -389,7 +389,7 @@ }, { "cell_type": "markdown", - "id": "e302a5ca", + "id": "857094c0", "metadata": { "lines_to_next_cell": 0 }, @@ -403,7 +403,7 @@ }, { "cell_type": "markdown", - "id": "844f37f0", + "id": "b8b4541c", "metadata": {}, "source": [ "\n", @@ -429,7 +429,7 @@ }, { "cell_type": "markdown", - "id": "d6e8e759", + "id": "bae82f1c", "metadata": {}, "source": [ "

      Task 2.3: Use random noise as a baseline

      \n", @@ -441,7 +441,7 @@ { "cell_type": "code", "execution_count": null, - "id": "74344ee8", + "id": "3c0bdb51", "metadata": { "tags": [ "task" @@ -463,7 +463,7 @@ }, { "cell_type": "markdown", - "id": "6d560996", + "id": "a2118766", "metadata": { "tags": [] }, @@ -477,7 +477,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e15c02f7", + "id": "51f7943a", "metadata": { "tags": [ "task" @@ -501,7 +501,7 @@ }, { "cell_type": "markdown", - "id": "de810272", + "id": "23696f85", "metadata": { "tags": [] }, @@ -517,7 +517,7 @@ }, { "cell_type": "markdown", - "id": "7a6f89dc", + "id": "9e645510", "metadata": {}, "source": [ "

      BONUS Task: Using different attributions.

      \n", @@ -531,7 +531,7 @@ }, { "cell_type": "markdown", - "id": "f3e293c5", + "id": "86ccf43a", "metadata": {}, "source": [ "

      Checkpoint 2

      \n", @@ -551,7 +551,7 @@ }, { "cell_type": "markdown", - "id": "fb3048e1", + "id": "a2c90390", "metadata": { "lines_to_next_cell": 0 }, @@ -579,7 +579,7 @@ }, { "cell_type": "markdown", - "id": "88f77592", + "id": "0a3878d0", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -593,8 +593,8 @@ "\n", "We will not be using the random latent code (green, in the figure), so the model we use is made up of three networks:\n", "- The generator - this will be the bulk of the model, and will be responsible for transforming the images: we're going to use a `UNet`\n", - "- The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel`\n", "- The style encoder - this will be responsible for encoding the style of the image: we're going to use a `DenseModel`\n", + "- The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel`\n", "\n", "Let's start by creating these!" ] @@ -602,7 +602,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6742eca7", + "id": "57993293", "metadata": {}, "outputs": [], "source": [ @@ -634,7 +634,7 @@ }, { "cell_type": "markdown", - "id": "3ed703e3", + "id": "4156e52f", "metadata": { "lines_to_next_cell": 0 }, @@ -649,7 +649,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57a7d702", + "id": "02bc6f5a", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -670,7 +670,7 @@ }, { "cell_type": "markdown", - "id": "fdf97172", + "id": "157db656", "metadata": { "tags": [] }, @@ -685,7 +685,7 @@ }, { "cell_type": "markdown", - "id": "bcb08a78", + "id": "1205a0f7", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -702,7 +702,7 @@ { "cell_type": "code", "execution_count": null, - "id": "349544f2", + "id": "9f3464a7", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -716,7 +716,7 @@ }, { "cell_type": "markdown", - "id": "fd541400", + "id": "ca1cd81b", "metadata": { "lines_to_next_cell": 0 }, @@ -727,7 +727,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1e91e416", + "id": "6188d433", "metadata": {}, "outputs": [], "source": [ @@ -737,7 +737,7 @@ }, { "cell_type": "markdown", - "id": "b0377825", + "id": "88d9c1c5", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -755,7 +755,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f4939a8d", + "id": "89ced942", "metadata": { "lines_to_next_cell": 0 }, @@ -767,7 +767,7 @@ }, { "cell_type": "markdown", - "id": "9a6ca642", + "id": "5cf0dc3f", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -786,7 +786,7 @@ { "cell_type": "code", "execution_count": null, - "id": "382fa57e", + "id": "ce0263c7", "metadata": {}, "outputs": [], "source": [ @@ -795,7 +795,7 @@ }, { "cell_type": "markdown", - "id": "ccf3b275", + "id": "51907ad5", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -811,7 +811,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a424199", + "id": "f711c045", "metadata": {}, "outputs": [], "source": [ @@ -820,7 +820,7 @@ }, { "cell_type": "markdown", - "id": "fe335f99", + "id": "2aff857d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -832,7 +832,7 @@ { "cell_type": "code", "execution_count": null, - "id": "977ce4c8", + "id": "bf789cc4", "metadata": {}, "outputs": [], "source": [ @@ -845,7 +845,7 @@ }, { "cell_type": "markdown", - "id": "7bf8cb24", + "id": "1813a71a", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -859,7 +859,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0d5ee1e1", + "id": "fb1b2b51", "metadata": {}, "outputs": [], "source": [ @@ -871,7 +871,7 @@ }, { "cell_type": "markdown", - "id": "43c93243", + "id": "af26f4ca", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -891,7 +891,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9a789ba4", + "id": "9a90d3ad", "metadata": {}, "outputs": [], "source": [ @@ -915,7 +915,7 @@ { "cell_type": "code", "execution_count": null, - "id": "91b15866", + "id": "9b109750", "metadata": {}, "outputs": [], "source": [ @@ -925,7 +925,7 @@ }, { "cell_type": "markdown", - "id": "87d5a8a1", + "id": "6e9159d6", "metadata": {}, "source": [ "

      Checkpoint 3

      \n", @@ -944,7 +944,7 @@ }, { "cell_type": "markdown", - "id": "3a1e7dd9", + "id": "d84d7806", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -954,10 +954,10 @@ "You were given several different options in the training code below. In each case, one of the options will work, and the other will not.\n", "Comment out the option that you think will not work.\n", "
        \n", - "
      • Choose the values for `set_requires_grad`. Hint: which part of the code is training the generator? Which part is training the discriminator
      • \n", - "
      • Choose the values of `set_requires_grad`, again. Hint: you may want to switch
      • \n", + "
      • Choose the values for set_requires_grad. Hint: which part of the code is training the generator? Which part is training the discriminator
      • \n", + "
      • Choose the values of set_requires_grad, again. Hint: you may want to switch
      • \n", "
      • Choose the sign of the discriminator loss. Hint: what does the discriminator want to do?
      • \n", - ".
      • Apply the EMA update. Hint: which model do you want to update? You can look again at the code we wrote above.
      • \n", + "
      • Apply the EMA update. Hint: which model do you want to update? You can look again at the code we wrote above.
      • \n", "
      \n", "Let's train the StarGAN one batch a time.\n", "While you watch the model train, consider whether you think it will be successful at generating counterfactuals in the number of steps we give it. What is the minimum number of iterations you think are needed for this to work, and how much time do yo uthink it will take?\n", @@ -966,7 +966,7 @@ }, { "cell_type": "markdown", - "id": "c6348be7", + "id": "d5a5a4c4", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -978,7 +978,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f5ed4de5", + "id": "1990acdf", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1089,7 +1089,7 @@ }, { "cell_type": "markdown", - "id": "f6cd135b", + "id": "5aba6a7f", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1101,7 +1101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cdecbbbe", + "id": "537f4e6b", "metadata": {}, "outputs": [], "source": [ @@ -1117,7 +1117,7 @@ }, { "cell_type": "markdown", - "id": "2c7d13e4", + "id": "605bd7ae", "metadata": { "tags": [] }, @@ -1132,7 +1132,7 @@ }, { "cell_type": "markdown", - "id": "689db81d", + "id": "fe825a06", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1144,7 +1144,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c3f514fa", + "id": "4a02eb13", "metadata": {}, "outputs": [], "source": [ @@ -1166,7 +1166,7 @@ }, { "cell_type": "markdown", - "id": "27ee28e5", + "id": "622f387c", "metadata": { "tags": [] }, @@ -1182,7 +1182,7 @@ }, { "cell_type": "markdown", - "id": "e1c592af", + "id": "d51edfbb", "metadata": { "tags": [] }, @@ -1192,7 +1192,7 @@ }, { "cell_type": "markdown", - "id": "d2374be1", + "id": "e62c728e", "metadata": { "tags": [] }, @@ -1209,7 +1209,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c038ee90", + "id": "7cbb0b43", "metadata": { "title": "Loading the test dataset" }, @@ -1229,7 +1229,7 @@ }, { "cell_type": "markdown", - "id": "2dd7e283", + "id": "ea8fd639", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1241,7 +1241,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1910ed0f", + "id": "b2e876d1", "metadata": {}, "outputs": [], "source": [ @@ -1254,7 +1254,7 @@ }, { "cell_type": "markdown", - "id": "99188e3e", + "id": "c34f6d79", "metadata": { "lines_to_next_cell": 0 }, @@ -1264,7 +1264,7 @@ }, { "cell_type": "markdown", - "id": "fe2b4929", + "id": "4d27e43c", "metadata": { "lines_to_next_cell": 0 }, @@ -1282,7 +1282,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30cd9209", + "id": "78d51072", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1300,25 +1300,25 @@ "predictions = []\n", "source_labels = []\n", "target_labels = []\n", + "with torch.inference_mode():\n", + " for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images):\n", + " for lbl in range(4):\n", + " # TODO Create the counterfactual\n", + " x_fake = generator(x.unsqueeze(0).to(device), ...)\n", + " # TODO Predict the class of the counterfactual image\n", + " pred = model(...)\n", "\n", - "for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images):\n", - " for lbl in range(4):\n", - " # TODO Create the counterfactual\n", - " x_fake = generator(x.unsqueeze(0).to(device), ...)\n", - " # TODO Predict the class of the counterfactual image\n", - " pred = model(...)\n", - "\n", - " # TODO Store the source and target labels\n", - " source_labels.append(...) # The original label of the image\n", - " target_labels.append(...) # The desired label of the counterfactual image\n", - " # Store the counterfactual image and prediction\n", - " counterfactuals[lbl][i] = x_fake.cpu().detach().numpy()\n", - " predictions.append(pred.argmax().item())" + " # TODO Store the source and target labels\n", + " source_labels.append(...) # The original label of the image\n", + " target_labels.append(...) # The desired label of the counterfactual image\n", + " # Store the counterfactual image and prediction\n", + " counterfactuals[lbl][i] = x_fake.cpu().detach().numpy()\n", + " predictions.append(pred.argmax().item())" ] }, { "cell_type": "markdown", - "id": "ef56e941", + "id": "20059d76", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1330,7 +1330,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1612f8ce", + "id": "de15b0fe", "metadata": {}, "outputs": [], "source": [ @@ -1343,7 +1343,7 @@ }, { "cell_type": "markdown", - "id": "2239beb9", + "id": "07962ea2", "metadata": { "tags": [] }, @@ -1358,7 +1358,7 @@ }, { "cell_type": "markdown", - "id": "168352ff", + "id": "1590ce72", "metadata": { "tags": [] }, @@ -1369,7 +1369,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f24cdc48", + "id": "ccaee798", "metadata": {}, "outputs": [], "source": [ @@ -1383,7 +1383,7 @@ }, { "cell_type": "markdown", - "id": "b093ffc8", + "id": "e309a547", "metadata": { "tags": [] }, @@ -1398,7 +1398,7 @@ }, { "cell_type": "markdown", - "id": "be5c5a98", + "id": "11aa4518", "metadata": { "lines_to_next_cell": 0 }, @@ -1413,7 +1413,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0891fcf7", + "id": "6d87fada", "metadata": {}, "outputs": [], "source": [ @@ -1434,7 +1434,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b080dfe1", + "id": "1ea7e8d5", "metadata": { "title": "Another visualization function" }, @@ -1463,7 +1463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e41e00c9", + "id": "21f1fef5", "metadata": { "lines_to_next_cell": 0 }, @@ -1479,7 +1479,7 @@ }, { "cell_type": "markdown", - "id": "ac7678a7", + "id": "2b579f39", "metadata": { "lines_to_next_cell": 0 }, @@ -1495,7 +1495,7 @@ }, { "cell_type": "markdown", - "id": "3e06986c", + "id": "f623afc1", "metadata": { "lines_to_next_cell": 0 }, @@ -1510,7 +1510,7 @@ }, { "cell_type": "markdown", - "id": "7c9a99c4", + "id": "da16de09", "metadata": { "lines_to_next_cell": 0 }, @@ -1524,7 +1524,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b1affa61", + "id": "80e4a662", "metadata": { "lines_to_next_cell": 0 }, @@ -1544,7 +1544,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4bf0e982", + "id": "54fdb6f2", "metadata": { "lines_to_next_cell": 0 }, @@ -1580,7 +1580,7 @@ }, { "cell_type": "markdown", - "id": "7f8f6ca5", + "id": "a0adcd59", "metadata": { "lines_to_next_cell": 0 }, @@ -1594,7 +1594,7 @@ }, { "cell_type": "markdown", - "id": "537edaa0", + "id": "37d2cfdb", "metadata": { "lines_to_next_cell": 0 }, @@ -1610,7 +1610,7 @@ }, { "cell_type": "markdown", - "id": "c4dd727d", + "id": "74fdb1ef", "metadata": { "lines_to_next_cell": 0 }, @@ -1626,7 +1626,7 @@ }, { "cell_type": "markdown", - "id": "c5a5e2c0", + "id": "b6260bf7", "metadata": { "lines_to_next_cell": 0 }, @@ -1649,7 +1649,7 @@ }, { "cell_type": "markdown", - "id": "a9ede52b", + "id": "5d15f240", "metadata": {}, "source": [ "

      Task 5.1: Explore the style space

      \n", @@ -1661,7 +1661,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af4a9d95", + "id": "0d7f3699", "metadata": {}, "outputs": [], "source": [ @@ -1696,7 +1696,7 @@ }, { "cell_type": "markdown", - "id": "be742cb1", + "id": "10c742dc", "metadata": { "lines_to_next_cell": 0 }, @@ -1712,7 +1712,7 @@ { "cell_type": "code", "execution_count": null, - "id": "09032a46", + "id": "aeb2b7bf", "metadata": { "lines_to_next_cell": 0 }, @@ -1739,7 +1739,7 @@ }, { "cell_type": "markdown", - "id": "4d9a7277", + "id": "4305d760", "metadata": { "lines_to_next_cell": 0 }, @@ -1753,7 +1753,7 @@ }, { "cell_type": "markdown", - "id": "3016bf5c", + "id": "68c4a8e1", "metadata": { "lines_to_next_cell": 0 }, @@ -1770,7 +1770,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b47bc9a2", + "id": "a13a8561", "metadata": {}, "outputs": [], "source": [ @@ -1790,19 +1790,9 @@ "plt.show()" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "5434a70a", - "metadata": { - "lines_to_next_cell": 0 - }, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", - "id": "9d066df4", + "id": "efd736b5", "metadata": {}, "source": [ "

      Questions

      \n", @@ -1814,7 +1804,7 @@ }, { "cell_type": "markdown", - "id": "e4295029", + "id": "9d4d9d16", "metadata": {}, "source": [ "

      Checkpoint 5

      \n", @@ -1832,23 +1822,56 @@ }, { "cell_type": "markdown", - "id": "3cd7976f", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "ec8f2497", + "metadata": {}, "source": [ "# Bonus!\n", "If you have extra time, you can try to break the StarGAN!\n", "There are a lot of little things that we did to make sure that it runs correctly - but what if we didn't?\n", "Some things you might want to try:\n", - "- What happens if you don't use the EMA model?\n", - "- What happens if you change the learning rates?\n", - "- What happens if you add a Sigmoid activation to the output of the style encoder?\n", + "
    • What happens if you don't use the EMA model?
    • \n", + "
    • What happens if you change the learning rates?
    • \n", + "
    • What happens if you add a Sigmoid activation to the output of the style encoder?
    • \n", "See what else you can think of, and see how finnicky training a GAN can be!\n", "\n", "# %% [markdown] tags=[\"solution\"]\n", "The colors for the classes are sampled from matplotlib colormaps! They are the four seasons: spring, summer, autumn, and winter.\n", - "Check your style space again to see if you can see the patterns now!" + "Check your style space again to see if you can see the patterns now!\n", + "\n", + "# %% tags=[\"solution\"]\n", + "Let's plot the colormaps\n", + "import matplotlib as mpl\n", + "import numpy as np\n", + "\n", + "\n", + "def plot_color_gradients(cmap_list):\n", + " gradient = np.linspace(0, 1, 256)\n", + " gradient = np.vstack((gradient, gradient))\n", + "\n", + " # Create figure and adjust figure height to number of colormaps\n", + " nrows = len(cmap_list)\n", + " figh = 0.35 + 0.15 + (nrows + (nrows - 1) * 0.1) * 0.22\n", + " fig, axs = plt.subplots(nrows=nrows + 1, figsize=(6.4, figh))\n", + " fig.subplots_adjust(top=1 - 0.35 / figh, bottom=0.15 / figh, left=0.2, right=0.99)\n", + "\n", + " for ax, name in zip(axs, cmap_list):\n", + " ax.imshow(gradient, aspect=\"auto\", cmap=mpl.colormaps[name])\n", + " ax.text(\n", + " -0.01,\n", + " 0.5,\n", + " name,\n", + " va=\"center\",\n", + " ha=\"right\",\n", + " fontsize=10,\n", + " transform=ax.transAxes,\n", + " )\n", + "\n", + " # Turn off *all* ticks & spines, not just the ones with colormaps.\n", + " for ax in axs:\n", + " ax.set_axis_off()\n", + "\n", + "\n", + "plot_color_gradients([\"spring\", \"summer\", \"autumn\", \"winter\"])" ] } ], diff --git a/solution.ipynb b/solution.ipynb index 9b17243..bb11933 100644 --- a/solution.ipynb +++ b/solution.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "eeffd47d", + "id": "c25b89a0", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "d9b65175", + "id": "b896f444", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "025f7566", + "id": "4633760c", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7f4771f3", + "id": "05b99695", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "9d60bf35", + "id": "b124e6c9", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4acd91e9", + "id": "ce4006c6", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "9e70adfb", + "id": "3d2f6db9", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "aa1a58cf", + "id": "42341879", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "95d9fa51", + "id": "dbae9556", "metadata": { "tags": [ "solution" @@ -154,7 +154,7 @@ }, { "cell_type": "markdown", - "id": "a3326bfe", + "id": "391c84f7", "metadata": { "lines_to_next_cell": 0 }, @@ -165,7 +165,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19354115", + "id": "b06f0958", "metadata": {}, "outputs": [], "source": [ @@ -192,7 +192,7 @@ }, { "cell_type": "markdown", - "id": "ff293353", + "id": "9192b9c5", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -202,7 +202,7 @@ }, { "cell_type": "markdown", - "id": "c8f3e86d", + "id": "91671c93", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -215,7 +215,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c6071dca", + "id": "b34f1344", "metadata": { "tags": [] }, @@ -233,7 +233,7 @@ }, { "cell_type": "markdown", - "id": "7be4fc86", + "id": "2c5a1bae", "metadata": { "tags": [] }, @@ -249,7 +249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e5f2cecc", + "id": "a30776a7", "metadata": { "tags": [ "solution" @@ -273,7 +273,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5395d4a2", + "id": "8b00e344", "metadata": { "tags": [] }, @@ -286,7 +286,7 @@ }, { "cell_type": "markdown", - "id": "f75d2492", + "id": "86339f4b", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -298,7 +298,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9d788363", + "id": "9aaa3fbf", "metadata": { "tags": [] }, @@ -326,7 +326,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56d5e9f3", + "id": "cdee9934", "metadata": { "tags": [] }, @@ -339,7 +339,7 @@ }, { "cell_type": "markdown", - "id": "4c28aff0", + "id": "f1c23e88", "metadata": { "lines_to_next_cell": 2 }, @@ -353,7 +353,7 @@ }, { "cell_type": "markdown", - "id": "e871c574", + "id": "5de8d1dd", "metadata": { "lines_to_next_cell": 0 }, @@ -366,7 +366,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9bd13121", + "id": "1a885c54", "metadata": {}, "outputs": [], "source": [ @@ -391,7 +391,7 @@ }, { "cell_type": "markdown", - "id": "e302a5ca", + "id": "857094c0", "metadata": { "lines_to_next_cell": 0 }, @@ -405,7 +405,7 @@ }, { "cell_type": "markdown", - "id": "844f37f0", + "id": "b8b4541c", "metadata": {}, "source": [ "\n", @@ -431,7 +431,7 @@ }, { "cell_type": "markdown", - "id": "d6e8e759", + "id": "bae82f1c", "metadata": {}, "source": [ "

      Task 2.3: Use random noise as a baseline

      \n", @@ -443,7 +443,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7135e83d", + "id": "bbc1a6ae", "metadata": { "tags": [ "solution" @@ -469,7 +469,7 @@ }, { "cell_type": "markdown", - "id": "6d560996", + "id": "a2118766", "metadata": { "tags": [] }, @@ -483,7 +483,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6920e975", + "id": "1ab3b1d9", "metadata": { "tags": [ "solution" @@ -513,7 +513,7 @@ }, { "cell_type": "markdown", - "id": "de810272", + "id": "23696f85", "metadata": { "tags": [] }, @@ -529,7 +529,7 @@ }, { "cell_type": "markdown", - "id": "7a6f89dc", + "id": "9e645510", "metadata": {}, "source": [ "

      BONUS Task: Using different attributions.

      \n", @@ -543,7 +543,7 @@ }, { "cell_type": "markdown", - "id": "f3e293c5", + "id": "86ccf43a", "metadata": {}, "source": [ "

      Checkpoint 2

      \n", @@ -563,7 +563,7 @@ }, { "cell_type": "markdown", - "id": "fb3048e1", + "id": "a2c90390", "metadata": { "lines_to_next_cell": 0 }, @@ -591,7 +591,7 @@ }, { "cell_type": "markdown", - "id": "88f77592", + "id": "0a3878d0", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -605,8 +605,8 @@ "\n", "We will not be using the random latent code (green, in the figure), so the model we use is made up of three networks:\n", "- The generator - this will be the bulk of the model, and will be responsible for transforming the images: we're going to use a `UNet`\n", - "- The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel`\n", "- The style encoder - this will be responsible for encoding the style of the image: we're going to use a `DenseModel`\n", + "- The discriminator - this will be responsible for telling the difference between real and fake images: we're going to use a `DenseModel`\n", "\n", "Let's start by creating these!" ] @@ -614,7 +614,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6742eca7", + "id": "57993293", "metadata": {}, "outputs": [], "source": [ @@ -646,7 +646,7 @@ }, { "cell_type": "markdown", - "id": "3ed703e3", + "id": "4156e52f", "metadata": { "lines_to_next_cell": 0 }, @@ -661,7 +661,7 @@ { "cell_type": "code", "execution_count": null, - "id": "981023f6", + "id": "789f35dd", "metadata": { "tags": [ "solution" @@ -678,7 +678,7 @@ }, { "cell_type": "markdown", - "id": "fdf97172", + "id": "157db656", "metadata": { "tags": [] }, @@ -693,7 +693,7 @@ }, { "cell_type": "markdown", - "id": "bcb08a78", + "id": "1205a0f7", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -710,7 +710,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fa38c00f", + "id": "234a4375", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -724,7 +724,7 @@ }, { "cell_type": "markdown", - "id": "fd541400", + "id": "ca1cd81b", "metadata": { "lines_to_next_cell": 0 }, @@ -735,7 +735,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1e91e416", + "id": "6188d433", "metadata": {}, "outputs": [], "source": [ @@ -745,7 +745,7 @@ }, { "cell_type": "markdown", - "id": "b0377825", + "id": "88d9c1c5", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -763,7 +763,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f4939a8d", + "id": "89ced942", "metadata": { "lines_to_next_cell": 0 }, @@ -775,7 +775,7 @@ }, { "cell_type": "markdown", - "id": "9a6ca642", + "id": "5cf0dc3f", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -794,7 +794,7 @@ { "cell_type": "code", "execution_count": null, - "id": "382fa57e", + "id": "ce0263c7", "metadata": {}, "outputs": [], "source": [ @@ -803,7 +803,7 @@ }, { "cell_type": "markdown", - "id": "ccf3b275", + "id": "51907ad5", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -819,7 +819,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a424199", + "id": "f711c045", "metadata": {}, "outputs": [], "source": [ @@ -828,7 +828,7 @@ }, { "cell_type": "markdown", - "id": "fe335f99", + "id": "2aff857d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -840,7 +840,7 @@ { "cell_type": "code", "execution_count": null, - "id": "977ce4c8", + "id": "bf789cc4", "metadata": {}, "outputs": [], "source": [ @@ -853,7 +853,7 @@ }, { "cell_type": "markdown", - "id": "7bf8cb24", + "id": "1813a71a", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -867,7 +867,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0d5ee1e1", + "id": "fb1b2b51", "metadata": {}, "outputs": [], "source": [ @@ -879,7 +879,7 @@ }, { "cell_type": "markdown", - "id": "43c93243", + "id": "af26f4ca", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -899,7 +899,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9a789ba4", + "id": "9a90d3ad", "metadata": {}, "outputs": [], "source": [ @@ -923,7 +923,7 @@ { "cell_type": "code", "execution_count": null, - "id": "91b15866", + "id": "9b109750", "metadata": {}, "outputs": [], "source": [ @@ -933,7 +933,7 @@ }, { "cell_type": "markdown", - "id": "87d5a8a1", + "id": "6e9159d6", "metadata": {}, "source": [ "

      Checkpoint 3

      \n", @@ -952,7 +952,7 @@ }, { "cell_type": "markdown", - "id": "3a1e7dd9", + "id": "d84d7806", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -962,10 +962,10 @@ "You were given several different options in the training code below. In each case, one of the options will work, and the other will not.\n", "Comment out the option that you think will not work.\n", "
        \n", - "
      • Choose the values for `set_requires_grad`. Hint: which part of the code is training the generator? Which part is training the discriminator
      • \n", - "
      • Choose the values of `set_requires_grad`, again. Hint: you may want to switch
      • \n", + "
      • Choose the values for set_requires_grad. Hint: which part of the code is training the generator? Which part is training the discriminator
      • \n", + "
      • Choose the values of set_requires_grad, again. Hint: you may want to switch
      • \n", "
      • Choose the sign of the discriminator loss. Hint: what does the discriminator want to do?
      • \n", - ".
      • Apply the EMA update. Hint: which model do you want to update? You can look again at the code we wrote above.
      • \n", + "
      • Apply the EMA update. Hint: which model do you want to update? You can look again at the code we wrote above.
      • \n", "
      \n", "Let's train the StarGAN one batch a time.\n", "While you watch the model train, consider whether you think it will be successful at generating counterfactuals in the number of steps we give it. What is the minimum number of iterations you think are needed for this to work, and how much time do yo uthink it will take?\n", @@ -974,7 +974,7 @@ }, { "cell_type": "markdown", - "id": "c6348be7", + "id": "d5a5a4c4", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -986,7 +986,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e67449b2", + "id": "bb842251", "metadata": { "lines_to_next_cell": 2, "tags": [ @@ -1056,7 +1056,7 @@ }, { "cell_type": "markdown", - "id": "f6cd135b", + "id": "5aba6a7f", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1068,7 +1068,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cdecbbbe", + "id": "537f4e6b", "metadata": {}, "outputs": [], "source": [ @@ -1084,7 +1084,7 @@ }, { "cell_type": "markdown", - "id": "2c7d13e4", + "id": "605bd7ae", "metadata": { "tags": [] }, @@ -1099,7 +1099,7 @@ }, { "cell_type": "markdown", - "id": "689db81d", + "id": "fe825a06", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1111,7 +1111,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c3f514fa", + "id": "4a02eb13", "metadata": {}, "outputs": [], "source": [ @@ -1133,7 +1133,7 @@ }, { "cell_type": "markdown", - "id": "27ee28e5", + "id": "622f387c", "metadata": { "tags": [] }, @@ -1149,7 +1149,7 @@ }, { "cell_type": "markdown", - "id": "e1c592af", + "id": "d51edfbb", "metadata": { "tags": [] }, @@ -1159,7 +1159,7 @@ }, { "cell_type": "markdown", - "id": "d2374be1", + "id": "e62c728e", "metadata": { "tags": [] }, @@ -1176,7 +1176,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c038ee90", + "id": "7cbb0b43", "metadata": { "title": "Loading the test dataset" }, @@ -1196,7 +1196,7 @@ }, { "cell_type": "markdown", - "id": "2dd7e283", + "id": "ea8fd639", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1208,7 +1208,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1910ed0f", + "id": "b2e876d1", "metadata": {}, "outputs": [], "source": [ @@ -1221,7 +1221,7 @@ }, { "cell_type": "markdown", - "id": "99188e3e", + "id": "c34f6d79", "metadata": { "lines_to_next_cell": 0 }, @@ -1231,7 +1231,7 @@ }, { "cell_type": "markdown", - "id": "fe2b4929", + "id": "4d27e43c", "metadata": { "lines_to_next_cell": 0 }, @@ -1249,7 +1249,7 @@ { "cell_type": "code", "execution_count": null, - "id": "99768fc9", + "id": "39b4c4a0", "metadata": { "tags": [ "solution" @@ -1266,27 +1266,27 @@ "predictions = []\n", "source_labels = []\n", "target_labels = []\n", + "with torch.inference_mode():\n", + " for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images):\n", + " for lbl in range(4):\n", + " # Create the counterfactual\n", + " x_fake = generator(\n", + " x.unsqueeze(0).to(device), prototypes[lbl].unsqueeze(0).to(device)\n", + " )\n", + " # Predict the class of the counterfactual image\n", + " pred = model(x_fake)\n", "\n", - "for i, (x, y) in tqdm(enumerate(random_test_mnist), total=num_images):\n", - " for lbl in range(4):\n", - " # Create the counterfactual\n", - " x_fake = generator(\n", - " x.unsqueeze(0).to(device), prototypes[lbl].unsqueeze(0).to(device)\n", - " )\n", - " # Predict the class of the counterfactual image\n", - " pred = model(x_fake)\n", - "\n", - " # Store the source and target labels\n", - " source_labels.append(y) # The original label of the image\n", - " target_labels.append(lbl) # The desired label of the counterfactual image\n", - " # Store the counterfactual image and prediction\n", - " counterfactuals[lbl][i] = x_fake.cpu().detach().numpy()\n", - " predictions.append(pred.argmax().item())" + " # Store the source and target labels\n", + " source_labels.append(y) # The original label of the image\n", + " target_labels.append(lbl) # The desired label of the counterfactual image\n", + " # Store the counterfactual image and prediction\n", + " counterfactuals[lbl][i] = x_fake.cpu().detach().numpy()\n", + " predictions.append(pred.argmax().item())" ] }, { "cell_type": "markdown", - "id": "ef56e941", + "id": "20059d76", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1298,7 +1298,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1612f8ce", + "id": "de15b0fe", "metadata": {}, "outputs": [], "source": [ @@ -1311,7 +1311,7 @@ }, { "cell_type": "markdown", - "id": "2239beb9", + "id": "07962ea2", "metadata": { "tags": [] }, @@ -1326,7 +1326,7 @@ }, { "cell_type": "markdown", - "id": "168352ff", + "id": "1590ce72", "metadata": { "tags": [] }, @@ -1337,7 +1337,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f24cdc48", + "id": "ccaee798", "metadata": {}, "outputs": [], "source": [ @@ -1351,7 +1351,7 @@ }, { "cell_type": "markdown", - "id": "b093ffc8", + "id": "e309a547", "metadata": { "tags": [] }, @@ -1366,7 +1366,7 @@ }, { "cell_type": "markdown", - "id": "be5c5a98", + "id": "11aa4518", "metadata": { "lines_to_next_cell": 0 }, @@ -1381,7 +1381,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0891fcf7", + "id": "6d87fada", "metadata": {}, "outputs": [], "source": [ @@ -1402,7 +1402,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b080dfe1", + "id": "1ea7e8d5", "metadata": { "title": "Another visualization function" }, @@ -1431,7 +1431,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e41e00c9", + "id": "21f1fef5", "metadata": { "lines_to_next_cell": 0 }, @@ -1447,7 +1447,7 @@ }, { "cell_type": "markdown", - "id": "ac7678a7", + "id": "2b579f39", "metadata": { "lines_to_next_cell": 0 }, @@ -1463,7 +1463,7 @@ }, { "cell_type": "markdown", - "id": "3e06986c", + "id": "f623afc1", "metadata": { "lines_to_next_cell": 0 }, @@ -1478,7 +1478,7 @@ }, { "cell_type": "markdown", - "id": "7c9a99c4", + "id": "da16de09", "metadata": { "lines_to_next_cell": 0 }, @@ -1492,7 +1492,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b1affa61", + "id": "80e4a662", "metadata": { "lines_to_next_cell": 0 }, @@ -1512,7 +1512,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4bf0e982", + "id": "54fdb6f2", "metadata": { "lines_to_next_cell": 0 }, @@ -1548,7 +1548,7 @@ }, { "cell_type": "markdown", - "id": "7f8f6ca5", + "id": "a0adcd59", "metadata": { "lines_to_next_cell": 0 }, @@ -1562,7 +1562,7 @@ }, { "cell_type": "markdown", - "id": "537edaa0", + "id": "37d2cfdb", "metadata": { "lines_to_next_cell": 0 }, @@ -1578,7 +1578,7 @@ }, { "cell_type": "markdown", - "id": "c4dd727d", + "id": "74fdb1ef", "metadata": { "lines_to_next_cell": 0 }, @@ -1594,7 +1594,7 @@ }, { "cell_type": "markdown", - "id": "c5a5e2c0", + "id": "b6260bf7", "metadata": { "lines_to_next_cell": 0 }, @@ -1617,7 +1617,7 @@ }, { "cell_type": "markdown", - "id": "a9ede52b", + "id": "5d15f240", "metadata": {}, "source": [ "

      Task 5.1: Explore the style space

      \n", @@ -1629,7 +1629,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af4a9d95", + "id": "0d7f3699", "metadata": {}, "outputs": [], "source": [ @@ -1664,7 +1664,7 @@ }, { "cell_type": "markdown", - "id": "be742cb1", + "id": "10c742dc", "metadata": { "lines_to_next_cell": 0 }, @@ -1680,7 +1680,7 @@ { "cell_type": "code", "execution_count": null, - "id": "09032a46", + "id": "aeb2b7bf", "metadata": { "lines_to_next_cell": 0 }, @@ -1707,7 +1707,7 @@ }, { "cell_type": "markdown", - "id": "4d9a7277", + "id": "4305d760", "metadata": { "lines_to_next_cell": 0 }, @@ -1721,7 +1721,7 @@ }, { "cell_type": "markdown", - "id": "3016bf5c", + "id": "68c4a8e1", "metadata": { "lines_to_next_cell": 0 }, @@ -1738,7 +1738,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b47bc9a2", + "id": "a13a8561", "metadata": {}, "outputs": [], "source": [ @@ -1758,19 +1758,9 @@ "plt.show()" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "5434a70a", - "metadata": { - "lines_to_next_cell": 0 - }, - "outputs": [], - "source": [] - }, { "cell_type": "markdown", - "id": "9d066df4", + "id": "efd736b5", "metadata": {}, "source": [ "

      Questions

      \n", @@ -1782,7 +1772,7 @@ }, { "cell_type": "markdown", - "id": "e4295029", + "id": "9d4d9d16", "metadata": {}, "source": [ "

      Checkpoint 5

      \n", @@ -1800,37 +1790,24 @@ }, { "cell_type": "markdown", - "id": "3cd7976f", - "metadata": { - "lines_to_next_cell": 0 - }, + "id": "ec8f2497", + "metadata": {}, "source": [ "# Bonus!\n", "If you have extra time, you can try to break the StarGAN!\n", "There are a lot of little things that we did to make sure that it runs correctly - but what if we didn't?\n", "Some things you might want to try:\n", - "- What happens if you don't use the EMA model?\n", - "- What happens if you change the learning rates?\n", - "- What happens if you add a Sigmoid activation to the output of the style encoder?\n", + "
    • What happens if you don't use the EMA model?
    • \n", + "
    • What happens if you change the learning rates?
    • \n", + "
    • What happens if you add a Sigmoid activation to the output of the style encoder?
    • \n", "See what else you can think of, and see how finnicky training a GAN can be!\n", "\n", "# %% [markdown] tags=[\"solution\"]\n", "The colors for the classes are sampled from matplotlib colormaps! They are the four seasons: spring, summer, autumn, and winter.\n", - "Check your style space again to see if you can see the patterns now!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ffeee5b2", - "metadata": { - "tags": [ - "solution" - ] - }, - "outputs": [], - "source": [ - "# Let's plot the colormaps\n", + "Check your style space again to see if you can see the patterns now!\n", + "\n", + "# %% tags=[\"solution\"]\n", + "Let's plot the colormaps\n", "import matplotlib as mpl\n", "import numpy as np\n", "\n", From a603977547dda2ff7e44cbbbc892eb65857ffece Mon Sep 17 00:00:00 2001 From: Larissa Heinrich Date: Mon, 26 Aug 2024 20:32:16 +0000 Subject: [PATCH 06/11] add checkpoint 1 for clarity, remove the extra we added --- solution.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/solution.py b/solution.py index 1768933..ee6ad15 100644 --- a/solution.py +++ b/solution.py @@ -112,7 +112,15 @@ plt.ylabel("True") plt.xlabel("Predicted") plt.show() - +# %% [markdown] +#

      Checkpoint 1

      +# +# At this point we have: +# +# - Loaded a classifier that classifies MNIST-like images by color, but we don't know how! +# +# We will not stop her as a group, it's just the end of Part 1. So continue on with part 2 right away. +#
      # %% [markdown] # # Part 2: Using Integrated Gradients to find what the classifier knows # @@ -560,19 +568,6 @@ def copy_parameters(source_model, target_model): generator_ema = Generator(deepcopy(unet), style_encoder=deepcopy(style_encoder)) generator_ema = generator_ema.to(device) -# %% [markdown] -#

      Checkpoint 3

      -# Put up your green sticky note when you've reached this point! -# -# At this point we have: -# -# - Loaded a classifier that classifies MNIST-like images by color, but we don't know how! -# - Tried applying Integrated Gradients to find out what the classifier is looking at - with little success. -# - Discovered the effect of changing the baseline on the output of integrated gradients. -# - Defined the hyperparameters for a StarGAN to create counterfactual images. -# -# Next up, we will define the training loop for the StarGAN. -#
      # %% [markdown] tags=[] #

      Task 3.3: Training!

      From ad563211f192be52fc519399f00364d2d9c7e852 Mon Sep 17 00:00:00 2001 From: neptunes5thmoon Date: Mon, 26 Aug 2024 20:33:07 +0000 Subject: [PATCH 07/11] Commit from GitHub Actions (Build Notebooks) --- exercise.ipynb | 246 +++++++++++++++++++++++++------------------------ solution.ipynb | 246 +++++++++++++++++++++++++------------------------ 2 files changed, 248 insertions(+), 244 deletions(-) diff --git a/exercise.ipynb b/exercise.ipynb index e8f9a5e..ab54c2d 100644 --- a/exercise.ipynb +++ b/exercise.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "c25b89a0", + "id": "0e12b745", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "b896f444", + "id": "a5a4c73b", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "4633760c", + "id": "caee6e2d", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "05b99695", + "id": "60be885c", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "b124e6c9", + "id": "17dfe28f", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ce4006c6", + "id": "e22f0615", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "3d2f6db9", + "id": "b69e6a45", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "42341879", + "id": "37df023f", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d4f52196", + "id": "a98e6cb7", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -155,7 +155,7 @@ }, { "cell_type": "markdown", - "id": "391c84f7", + "id": "250e04ae", "metadata": { "lines_to_next_cell": 0 }, @@ -166,8 +166,10 @@ { "cell_type": "code", "execution_count": null, - "id": "b06f0958", - "metadata": {}, + "id": "c0bf76da", + "metadata": { + "lines_to_next_cell": 0 + }, "outputs": [], "source": [ "from torch.utils.data import DataLoader\n", @@ -193,7 +195,24 @@ }, { "cell_type": "markdown", - "id": "9192b9c5", + "id": "d3657082", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "

      Checkpoint 1

      \n", + "\n", + "At this point we have:\n", + "\n", + "- Loaded a classifier that classifies MNIST-like images by color, but we don't know how!\n", + "\n", + "We will not stop her as a group, it's just the end of Part 1. So continue on with part 2 right away.\n", + "
      " + ] + }, + { + "cell_type": "markdown", + "id": "d7d6d20b", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -203,7 +222,7 @@ }, { "cell_type": "markdown", - "id": "91671c93", + "id": "2651461e", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -216,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b34f1344", + "id": "0a6c14a4", "metadata": { "tags": [] }, @@ -234,7 +253,7 @@ }, { "cell_type": "markdown", - "id": "2c5a1bae", + "id": "ea5ae280", "metadata": { "tags": [] }, @@ -250,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ac553834", + "id": "c3089484", "metadata": { "tags": [ "task" @@ -271,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8b00e344", + "id": "48053a52", "metadata": { "tags": [] }, @@ -284,7 +303,7 @@ }, { "cell_type": "markdown", - "id": "86339f4b", + "id": "a171cfd0", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -296,7 +315,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9aaa3fbf", + "id": "5f1fa7f4", "metadata": { "tags": [] }, @@ -324,7 +343,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cdee9934", + "id": "6d30759d", "metadata": { "tags": [] }, @@ -337,7 +356,7 @@ }, { "cell_type": "markdown", - "id": "f1c23e88", + "id": "201452d4", "metadata": { "lines_to_next_cell": 2 }, @@ -351,7 +370,7 @@ }, { "cell_type": "markdown", - "id": "5de8d1dd", + "id": "212b9a38", "metadata": { "lines_to_next_cell": 0 }, @@ -364,7 +383,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1a885c54", + "id": "ac0d72aa", "metadata": {}, "outputs": [], "source": [ @@ -389,7 +408,7 @@ }, { "cell_type": "markdown", - "id": "857094c0", + "id": "445763f0", "metadata": { "lines_to_next_cell": 0 }, @@ -403,7 +422,7 @@ }, { "cell_type": "markdown", - "id": "b8b4541c", + "id": "d786f7a9", "metadata": {}, "source": [ "\n", @@ -429,7 +448,7 @@ }, { "cell_type": "markdown", - "id": "bae82f1c", + "id": "ee1ed4c2", "metadata": {}, "source": [ "

      Task 2.3: Use random noise as a baseline

      \n", @@ -441,7 +460,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3c0bdb51", + "id": "a2d9d5ff", "metadata": { "tags": [ "task" @@ -463,7 +482,7 @@ }, { "cell_type": "markdown", - "id": "a2118766", + "id": "1ba578d2", "metadata": { "tags": [] }, @@ -477,7 +496,7 @@ { "cell_type": "code", "execution_count": null, - "id": "51f7943a", + "id": "da1b4a9a", "metadata": { "tags": [ "task" @@ -501,7 +520,7 @@ }, { "cell_type": "markdown", - "id": "23696f85", + "id": "2f015ba4", "metadata": { "tags": [] }, @@ -517,7 +536,7 @@ }, { "cell_type": "markdown", - "id": "9e645510", + "id": "e087f05d", "metadata": {}, "source": [ "

      BONUS Task: Using different attributions.

      \n", @@ -531,7 +550,7 @@ }, { "cell_type": "markdown", - "id": "86ccf43a", + "id": "ae76eb75", "metadata": {}, "source": [ "

      Checkpoint 2

      \n", @@ -551,7 +570,7 @@ }, { "cell_type": "markdown", - "id": "a2c90390", + "id": "b2ec6177", "metadata": { "lines_to_next_cell": 0 }, @@ -579,7 +598,7 @@ }, { "cell_type": "markdown", - "id": "0a3878d0", + "id": "11737f5e", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -602,7 +621,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57993293", + "id": "382b5caf", "metadata": {}, "outputs": [], "source": [ @@ -634,7 +653,7 @@ }, { "cell_type": "markdown", - "id": "4156e52f", + "id": "09c1a43a", "metadata": { "lines_to_next_cell": 0 }, @@ -649,7 +668,7 @@ { "cell_type": "code", "execution_count": null, - "id": "02bc6f5a", + "id": "966f9091", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -670,7 +689,7 @@ }, { "cell_type": "markdown", - "id": "157db656", + "id": "e254f618", "metadata": { "tags": [] }, @@ -685,7 +704,7 @@ }, { "cell_type": "markdown", - "id": "1205a0f7", + "id": "9817ee9d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -702,7 +721,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9f3464a7", + "id": "199fadb0", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -716,7 +735,7 @@ }, { "cell_type": "markdown", - "id": "ca1cd81b", + "id": "5c1b84fa", "metadata": { "lines_to_next_cell": 0 }, @@ -727,7 +746,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6188d433", + "id": "35a1b628", "metadata": {}, "outputs": [], "source": [ @@ -737,7 +756,7 @@ }, { "cell_type": "markdown", - "id": "88d9c1c5", + "id": "0741cd51", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -755,7 +774,7 @@ { "cell_type": "code", "execution_count": null, - "id": "89ced942", + "id": "9ea04106", "metadata": { "lines_to_next_cell": 0 }, @@ -767,7 +786,7 @@ }, { "cell_type": "markdown", - "id": "5cf0dc3f", + "id": "df0aa4e4", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -786,7 +805,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ce0263c7", + "id": "888fdbee", "metadata": {}, "outputs": [], "source": [ @@ -795,7 +814,7 @@ }, { "cell_type": "markdown", - "id": "51907ad5", + "id": "3c6e2c10", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -811,7 +830,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f711c045", + "id": "c959fb2f", "metadata": {}, "outputs": [], "source": [ @@ -820,7 +839,7 @@ }, { "cell_type": "markdown", - "id": "2aff857d", + "id": "e7249572", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -832,7 +851,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bf789cc4", + "id": "17b8c4b3", "metadata": {}, "outputs": [], "source": [ @@ -845,7 +864,7 @@ }, { "cell_type": "markdown", - "id": "1813a71a", + "id": "fedb48d8", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -859,7 +878,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb1b2b51", + "id": "2ddc0100", "metadata": {}, "outputs": [], "source": [ @@ -871,7 +890,7 @@ }, { "cell_type": "markdown", - "id": "af26f4ca", + "id": "4d4d2810", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -891,7 +910,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9a90d3ad", + "id": "432f2f53", "metadata": {}, "outputs": [], "source": [ @@ -915,8 +934,10 @@ { "cell_type": "code", "execution_count": null, - "id": "9b109750", - "metadata": {}, + "id": "9d6397cf", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "generator_ema = Generator(deepcopy(unet), style_encoder=deepcopy(style_encoder))\n", @@ -925,26 +946,7 @@ }, { "cell_type": "markdown", - "id": "6e9159d6", - "metadata": {}, - "source": [ - "

      Checkpoint 3

      \n", - "Put up your green sticky note when you've reached this point!\n", - "\n", - "At this point we have:\n", - "\n", - "- Loaded a classifier that classifies MNIST-like images by color, but we don't know how!\n", - "- Tried applying Integrated Gradients to find out what the classifier is looking at - with little success.\n", - "- Discovered the effect of changing the baseline on the output of integrated gradients.\n", - "- Defined the hyperparameters for a StarGAN to create counterfactual images.\n", - "\n", - "Next up, we will define the training loop for the StarGAN.\n", - "
      " - ] - }, - { - "cell_type": "markdown", - "id": "d84d7806", + "id": "779bcf13", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -966,7 +968,7 @@ }, { "cell_type": "markdown", - "id": "d5a5a4c4", + "id": "752616b6", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -978,7 +980,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1990acdf", + "id": "c95ad770", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1089,7 +1091,7 @@ }, { "cell_type": "markdown", - "id": "5aba6a7f", + "id": "a7cdf035", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1101,7 +1103,7 @@ { "cell_type": "code", "execution_count": null, - "id": "537f4e6b", + "id": "4bd99022", "metadata": {}, "outputs": [], "source": [ @@ -1117,7 +1119,7 @@ }, { "cell_type": "markdown", - "id": "605bd7ae", + "id": "2ca0425e", "metadata": { "tags": [] }, @@ -1132,7 +1134,7 @@ }, { "cell_type": "markdown", - "id": "fe825a06", + "id": "8ed754f6", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1144,7 +1146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4a02eb13", + "id": "c90cd932", "metadata": {}, "outputs": [], "source": [ @@ -1166,7 +1168,7 @@ }, { "cell_type": "markdown", - "id": "622f387c", + "id": "23d27b74", "metadata": { "tags": [] }, @@ -1182,7 +1184,7 @@ }, { "cell_type": "markdown", - "id": "d51edfbb", + "id": "0afeb5a4", "metadata": { "tags": [] }, @@ -1192,7 +1194,7 @@ }, { "cell_type": "markdown", - "id": "e62c728e", + "id": "61e418f1", "metadata": { "tags": [] }, @@ -1209,7 +1211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7cbb0b43", + "id": "7b6c2411", "metadata": { "title": "Loading the test dataset" }, @@ -1229,7 +1231,7 @@ }, { "cell_type": "markdown", - "id": "ea8fd639", + "id": "26cd8983", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1241,7 +1243,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b2e876d1", + "id": "f1f5cc1e", "metadata": {}, "outputs": [], "source": [ @@ -1254,7 +1256,7 @@ }, { "cell_type": "markdown", - "id": "c34f6d79", + "id": "f3cd6599", "metadata": { "lines_to_next_cell": 0 }, @@ -1264,7 +1266,7 @@ }, { "cell_type": "markdown", - "id": "4d27e43c", + "id": "cefcba31", "metadata": { "lines_to_next_cell": 0 }, @@ -1282,7 +1284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78d51072", + "id": "8b706e0a", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1318,7 +1320,7 @@ }, { "cell_type": "markdown", - "id": "20059d76", + "id": "bfda4b7f", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1330,7 +1332,7 @@ { "cell_type": "code", "execution_count": null, - "id": "de15b0fe", + "id": "a5b2f903", "metadata": {}, "outputs": [], "source": [ @@ -1343,7 +1345,7 @@ }, { "cell_type": "markdown", - "id": "07962ea2", + "id": "935009d9", "metadata": { "tags": [] }, @@ -1358,7 +1360,7 @@ }, { "cell_type": "markdown", - "id": "1590ce72", + "id": "7683e70e", "metadata": { "tags": [] }, @@ -1369,7 +1371,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ccaee798", + "id": "3532d6ed", "metadata": {}, "outputs": [], "source": [ @@ -1383,7 +1385,7 @@ }, { "cell_type": "markdown", - "id": "e309a547", + "id": "c8c90e6b", "metadata": { "tags": [] }, @@ -1398,7 +1400,7 @@ }, { "cell_type": "markdown", - "id": "11aa4518", + "id": "c4c3eff4", "metadata": { "lines_to_next_cell": 0 }, @@ -1413,7 +1415,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6d87fada", + "id": "1eff97c0", "metadata": {}, "outputs": [], "source": [ @@ -1434,7 +1436,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1ea7e8d5", + "id": "c1281384", "metadata": { "title": "Another visualization function" }, @@ -1463,7 +1465,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21f1fef5", + "id": "42cf09ad", "metadata": { "lines_to_next_cell": 0 }, @@ -1479,7 +1481,7 @@ }, { "cell_type": "markdown", - "id": "2b579f39", + "id": "30ea4f96", "metadata": { "lines_to_next_cell": 0 }, @@ -1495,7 +1497,7 @@ }, { "cell_type": "markdown", - "id": "f623afc1", + "id": "6768c3d0", "metadata": { "lines_to_next_cell": 0 }, @@ -1510,7 +1512,7 @@ }, { "cell_type": "markdown", - "id": "da16de09", + "id": "0287a687", "metadata": { "lines_to_next_cell": 0 }, @@ -1524,7 +1526,7 @@ { "cell_type": "code", "execution_count": null, - "id": "80e4a662", + "id": "6459abb5", "metadata": { "lines_to_next_cell": 0 }, @@ -1544,7 +1546,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54fdb6f2", + "id": "ca40f5a7", "metadata": { "lines_to_next_cell": 0 }, @@ -1580,7 +1582,7 @@ }, { "cell_type": "markdown", - "id": "a0adcd59", + "id": "94276a21", "metadata": { "lines_to_next_cell": 0 }, @@ -1594,7 +1596,7 @@ }, { "cell_type": "markdown", - "id": "37d2cfdb", + "id": "a7597f89", "metadata": { "lines_to_next_cell": 0 }, @@ -1610,7 +1612,7 @@ }, { "cell_type": "markdown", - "id": "74fdb1ef", + "id": "9cb53f3f", "metadata": { "lines_to_next_cell": 0 }, @@ -1626,7 +1628,7 @@ }, { "cell_type": "markdown", - "id": "b6260bf7", + "id": "6a43a957", "metadata": { "lines_to_next_cell": 0 }, @@ -1649,7 +1651,7 @@ }, { "cell_type": "markdown", - "id": "5d15f240", + "id": "50e69413", "metadata": {}, "source": [ "

      Task 5.1: Explore the style space

      \n", @@ -1661,7 +1663,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0d7f3699", + "id": "c71b768e", "metadata": {}, "outputs": [], "source": [ @@ -1696,7 +1698,7 @@ }, { "cell_type": "markdown", - "id": "10c742dc", + "id": "a4676c24", "metadata": { "lines_to_next_cell": 0 }, @@ -1712,7 +1714,7 @@ { "cell_type": "code", "execution_count": null, - "id": "aeb2b7bf", + "id": "b5838e7c", "metadata": { "lines_to_next_cell": 0 }, @@ -1739,7 +1741,7 @@ }, { "cell_type": "markdown", - "id": "4305d760", + "id": "2aff4255", "metadata": { "lines_to_next_cell": 0 }, @@ -1753,7 +1755,7 @@ }, { "cell_type": "markdown", - "id": "68c4a8e1", + "id": "a80963e3", "metadata": { "lines_to_next_cell": 0 }, @@ -1770,7 +1772,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a13a8561", + "id": "65e070c3", "metadata": {}, "outputs": [], "source": [ @@ -1792,7 +1794,7 @@ }, { "cell_type": "markdown", - "id": "efd736b5", + "id": "6f5d93bb", "metadata": {}, "source": [ "

      Questions

      \n", @@ -1804,7 +1806,7 @@ }, { "cell_type": "markdown", - "id": "9d4d9d16", + "id": "1c2bf24d", "metadata": {}, "source": [ "

      Checkpoint 5

      \n", @@ -1822,7 +1824,7 @@ }, { "cell_type": "markdown", - "id": "ec8f2497", + "id": "c5cb1ade", "metadata": {}, "source": [ "# Bonus!\n", diff --git a/solution.ipynb b/solution.ipynb index bb11933..8e26901 100644 --- a/solution.ipynb +++ b/solution.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "c25b89a0", + "id": "0e12b745", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "b896f444", + "id": "a5a4c73b", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "4633760c", + "id": "caee6e2d", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "05b99695", + "id": "60be885c", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "b124e6c9", + "id": "17dfe28f", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ce4006c6", + "id": "e22f0615", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "3d2f6db9", + "id": "b69e6a45", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "42341879", + "id": "37df023f", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dbae9556", + "id": "9f9053c6", "metadata": { "tags": [ "solution" @@ -154,7 +154,7 @@ }, { "cell_type": "markdown", - "id": "391c84f7", + "id": "250e04ae", "metadata": { "lines_to_next_cell": 0 }, @@ -165,8 +165,10 @@ { "cell_type": "code", "execution_count": null, - "id": "b06f0958", - "metadata": {}, + "id": "c0bf76da", + "metadata": { + "lines_to_next_cell": 0 + }, "outputs": [], "source": [ "from torch.utils.data import DataLoader\n", @@ -192,7 +194,24 @@ }, { "cell_type": "markdown", - "id": "9192b9c5", + "id": "d3657082", + "metadata": { + "lines_to_next_cell": 0 + }, + "source": [ + "

      Checkpoint 1

      \n", + "\n", + "At this point we have:\n", + "\n", + "- Loaded a classifier that classifies MNIST-like images by color, but we don't know how!\n", + "\n", + "We will not stop her as a group, it's just the end of Part 1. So continue on with part 2 right away.\n", + "
      " + ] + }, + { + "cell_type": "markdown", + "id": "d7d6d20b", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -202,7 +221,7 @@ }, { "cell_type": "markdown", - "id": "91671c93", + "id": "2651461e", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -215,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b34f1344", + "id": "0a6c14a4", "metadata": { "tags": [] }, @@ -233,7 +252,7 @@ }, { "cell_type": "markdown", - "id": "2c5a1bae", + "id": "ea5ae280", "metadata": { "tags": [] }, @@ -249,7 +268,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a30776a7", + "id": "7eb845d8", "metadata": { "tags": [ "solution" @@ -273,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8b00e344", + "id": "48053a52", "metadata": { "tags": [] }, @@ -286,7 +305,7 @@ }, { "cell_type": "markdown", - "id": "86339f4b", + "id": "a171cfd0", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -298,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9aaa3fbf", + "id": "5f1fa7f4", "metadata": { "tags": [] }, @@ -326,7 +345,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cdee9934", + "id": "6d30759d", "metadata": { "tags": [] }, @@ -339,7 +358,7 @@ }, { "cell_type": "markdown", - "id": "f1c23e88", + "id": "201452d4", "metadata": { "lines_to_next_cell": 2 }, @@ -353,7 +372,7 @@ }, { "cell_type": "markdown", - "id": "5de8d1dd", + "id": "212b9a38", "metadata": { "lines_to_next_cell": 0 }, @@ -366,7 +385,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1a885c54", + "id": "ac0d72aa", "metadata": {}, "outputs": [], "source": [ @@ -391,7 +410,7 @@ }, { "cell_type": "markdown", - "id": "857094c0", + "id": "445763f0", "metadata": { "lines_to_next_cell": 0 }, @@ -405,7 +424,7 @@ }, { "cell_type": "markdown", - "id": "b8b4541c", + "id": "d786f7a9", "metadata": {}, "source": [ "\n", @@ -431,7 +450,7 @@ }, { "cell_type": "markdown", - "id": "bae82f1c", + "id": "ee1ed4c2", "metadata": {}, "source": [ "

      Task 2.3: Use random noise as a baseline

      \n", @@ -443,7 +462,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bbc1a6ae", + "id": "a3d5d688", "metadata": { "tags": [ "solution" @@ -469,7 +488,7 @@ }, { "cell_type": "markdown", - "id": "a2118766", + "id": "1ba578d2", "metadata": { "tags": [] }, @@ -483,7 +502,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1ab3b1d9", + "id": "9e7deaa3", "metadata": { "tags": [ "solution" @@ -513,7 +532,7 @@ }, { "cell_type": "markdown", - "id": "23696f85", + "id": "2f015ba4", "metadata": { "tags": [] }, @@ -529,7 +548,7 @@ }, { "cell_type": "markdown", - "id": "9e645510", + "id": "e087f05d", "metadata": {}, "source": [ "

      BONUS Task: Using different attributions.

      \n", @@ -543,7 +562,7 @@ }, { "cell_type": "markdown", - "id": "86ccf43a", + "id": "ae76eb75", "metadata": {}, "source": [ "

      Checkpoint 2

      \n", @@ -563,7 +582,7 @@ }, { "cell_type": "markdown", - "id": "a2c90390", + "id": "b2ec6177", "metadata": { "lines_to_next_cell": 0 }, @@ -591,7 +610,7 @@ }, { "cell_type": "markdown", - "id": "0a3878d0", + "id": "11737f5e", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -614,7 +633,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57993293", + "id": "382b5caf", "metadata": {}, "outputs": [], "source": [ @@ -646,7 +665,7 @@ }, { "cell_type": "markdown", - "id": "4156e52f", + "id": "09c1a43a", "metadata": { "lines_to_next_cell": 0 }, @@ -661,7 +680,7 @@ { "cell_type": "code", "execution_count": null, - "id": "789f35dd", + "id": "7371a75e", "metadata": { "tags": [ "solution" @@ -678,7 +697,7 @@ }, { "cell_type": "markdown", - "id": "157db656", + "id": "e254f618", "metadata": { "tags": [] }, @@ -693,7 +712,7 @@ }, { "cell_type": "markdown", - "id": "1205a0f7", + "id": "9817ee9d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -710,7 +729,7 @@ { "cell_type": "code", "execution_count": null, - "id": "234a4375", + "id": "68709b9f", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -724,7 +743,7 @@ }, { "cell_type": "markdown", - "id": "ca1cd81b", + "id": "5c1b84fa", "metadata": { "lines_to_next_cell": 0 }, @@ -735,7 +754,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6188d433", + "id": "35a1b628", "metadata": {}, "outputs": [], "source": [ @@ -745,7 +764,7 @@ }, { "cell_type": "markdown", - "id": "88d9c1c5", + "id": "0741cd51", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -763,7 +782,7 @@ { "cell_type": "code", "execution_count": null, - "id": "89ced942", + "id": "9ea04106", "metadata": { "lines_to_next_cell": 0 }, @@ -775,7 +794,7 @@ }, { "cell_type": "markdown", - "id": "5cf0dc3f", + "id": "df0aa4e4", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -794,7 +813,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ce0263c7", + "id": "888fdbee", "metadata": {}, "outputs": [], "source": [ @@ -803,7 +822,7 @@ }, { "cell_type": "markdown", - "id": "51907ad5", + "id": "3c6e2c10", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -819,7 +838,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f711c045", + "id": "c959fb2f", "metadata": {}, "outputs": [], "source": [ @@ -828,7 +847,7 @@ }, { "cell_type": "markdown", - "id": "2aff857d", + "id": "e7249572", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -840,7 +859,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bf789cc4", + "id": "17b8c4b3", "metadata": {}, "outputs": [], "source": [ @@ -853,7 +872,7 @@ }, { "cell_type": "markdown", - "id": "1813a71a", + "id": "fedb48d8", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -867,7 +886,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb1b2b51", + "id": "2ddc0100", "metadata": {}, "outputs": [], "source": [ @@ -879,7 +898,7 @@ }, { "cell_type": "markdown", - "id": "af26f4ca", + "id": "4d4d2810", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -899,7 +918,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9a90d3ad", + "id": "432f2f53", "metadata": {}, "outputs": [], "source": [ @@ -923,8 +942,10 @@ { "cell_type": "code", "execution_count": null, - "id": "9b109750", - "metadata": {}, + "id": "9d6397cf", + "metadata": { + "lines_to_next_cell": 2 + }, "outputs": [], "source": [ "generator_ema = Generator(deepcopy(unet), style_encoder=deepcopy(style_encoder))\n", @@ -933,26 +954,7 @@ }, { "cell_type": "markdown", - "id": "6e9159d6", - "metadata": {}, - "source": [ - "

      Checkpoint 3

      \n", - "Put up your green sticky note when you've reached this point!\n", - "\n", - "At this point we have:\n", - "\n", - "- Loaded a classifier that classifies MNIST-like images by color, but we don't know how!\n", - "- Tried applying Integrated Gradients to find out what the classifier is looking at - with little success.\n", - "- Discovered the effect of changing the baseline on the output of integrated gradients.\n", - "- Defined the hyperparameters for a StarGAN to create counterfactual images.\n", - "\n", - "Next up, we will define the training loop for the StarGAN.\n", - "
      " - ] - }, - { - "cell_type": "markdown", - "id": "d84d7806", + "id": "779bcf13", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -974,7 +976,7 @@ }, { "cell_type": "markdown", - "id": "d5a5a4c4", + "id": "752616b6", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -986,7 +988,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bb842251", + "id": "19d01864", "metadata": { "lines_to_next_cell": 2, "tags": [ @@ -1056,7 +1058,7 @@ }, { "cell_type": "markdown", - "id": "5aba6a7f", + "id": "a7cdf035", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1068,7 +1070,7 @@ { "cell_type": "code", "execution_count": null, - "id": "537f4e6b", + "id": "4bd99022", "metadata": {}, "outputs": [], "source": [ @@ -1084,7 +1086,7 @@ }, { "cell_type": "markdown", - "id": "605bd7ae", + "id": "2ca0425e", "metadata": { "tags": [] }, @@ -1099,7 +1101,7 @@ }, { "cell_type": "markdown", - "id": "fe825a06", + "id": "8ed754f6", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1111,7 +1113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4a02eb13", + "id": "c90cd932", "metadata": {}, "outputs": [], "source": [ @@ -1133,7 +1135,7 @@ }, { "cell_type": "markdown", - "id": "622f387c", + "id": "23d27b74", "metadata": { "tags": [] }, @@ -1149,7 +1151,7 @@ }, { "cell_type": "markdown", - "id": "d51edfbb", + "id": "0afeb5a4", "metadata": { "tags": [] }, @@ -1159,7 +1161,7 @@ }, { "cell_type": "markdown", - "id": "e62c728e", + "id": "61e418f1", "metadata": { "tags": [] }, @@ -1176,7 +1178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7cbb0b43", + "id": "7b6c2411", "metadata": { "title": "Loading the test dataset" }, @@ -1196,7 +1198,7 @@ }, { "cell_type": "markdown", - "id": "ea8fd639", + "id": "26cd8983", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1208,7 +1210,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b2e876d1", + "id": "f1f5cc1e", "metadata": {}, "outputs": [], "source": [ @@ -1221,7 +1223,7 @@ }, { "cell_type": "markdown", - "id": "c34f6d79", + "id": "f3cd6599", "metadata": { "lines_to_next_cell": 0 }, @@ -1231,7 +1233,7 @@ }, { "cell_type": "markdown", - "id": "4d27e43c", + "id": "cefcba31", "metadata": { "lines_to_next_cell": 0 }, @@ -1249,7 +1251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39b4c4a0", + "id": "7f0d387c", "metadata": { "tags": [ "solution" @@ -1286,7 +1288,7 @@ }, { "cell_type": "markdown", - "id": "20059d76", + "id": "bfda4b7f", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1298,7 +1300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "de15b0fe", + "id": "a5b2f903", "metadata": {}, "outputs": [], "source": [ @@ -1311,7 +1313,7 @@ }, { "cell_type": "markdown", - "id": "07962ea2", + "id": "935009d9", "metadata": { "tags": [] }, @@ -1326,7 +1328,7 @@ }, { "cell_type": "markdown", - "id": "1590ce72", + "id": "7683e70e", "metadata": { "tags": [] }, @@ -1337,7 +1339,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ccaee798", + "id": "3532d6ed", "metadata": {}, "outputs": [], "source": [ @@ -1351,7 +1353,7 @@ }, { "cell_type": "markdown", - "id": "e309a547", + "id": "c8c90e6b", "metadata": { "tags": [] }, @@ -1366,7 +1368,7 @@ }, { "cell_type": "markdown", - "id": "11aa4518", + "id": "c4c3eff4", "metadata": { "lines_to_next_cell": 0 }, @@ -1381,7 +1383,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6d87fada", + "id": "1eff97c0", "metadata": {}, "outputs": [], "source": [ @@ -1402,7 +1404,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1ea7e8d5", + "id": "c1281384", "metadata": { "title": "Another visualization function" }, @@ -1431,7 +1433,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21f1fef5", + "id": "42cf09ad", "metadata": { "lines_to_next_cell": 0 }, @@ -1447,7 +1449,7 @@ }, { "cell_type": "markdown", - "id": "2b579f39", + "id": "30ea4f96", "metadata": { "lines_to_next_cell": 0 }, @@ -1463,7 +1465,7 @@ }, { "cell_type": "markdown", - "id": "f623afc1", + "id": "6768c3d0", "metadata": { "lines_to_next_cell": 0 }, @@ -1478,7 +1480,7 @@ }, { "cell_type": "markdown", - "id": "da16de09", + "id": "0287a687", "metadata": { "lines_to_next_cell": 0 }, @@ -1492,7 +1494,7 @@ { "cell_type": "code", "execution_count": null, - "id": "80e4a662", + "id": "6459abb5", "metadata": { "lines_to_next_cell": 0 }, @@ -1512,7 +1514,7 @@ { "cell_type": "code", "execution_count": null, - "id": "54fdb6f2", + "id": "ca40f5a7", "metadata": { "lines_to_next_cell": 0 }, @@ -1548,7 +1550,7 @@ }, { "cell_type": "markdown", - "id": "a0adcd59", + "id": "94276a21", "metadata": { "lines_to_next_cell": 0 }, @@ -1562,7 +1564,7 @@ }, { "cell_type": "markdown", - "id": "37d2cfdb", + "id": "a7597f89", "metadata": { "lines_to_next_cell": 0 }, @@ -1578,7 +1580,7 @@ }, { "cell_type": "markdown", - "id": "74fdb1ef", + "id": "9cb53f3f", "metadata": { "lines_to_next_cell": 0 }, @@ -1594,7 +1596,7 @@ }, { "cell_type": "markdown", - "id": "b6260bf7", + "id": "6a43a957", "metadata": { "lines_to_next_cell": 0 }, @@ -1617,7 +1619,7 @@ }, { "cell_type": "markdown", - "id": "5d15f240", + "id": "50e69413", "metadata": {}, "source": [ "

      Task 5.1: Explore the style space

      \n", @@ -1629,7 +1631,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0d7f3699", + "id": "c71b768e", "metadata": {}, "outputs": [], "source": [ @@ -1664,7 +1666,7 @@ }, { "cell_type": "markdown", - "id": "10c742dc", + "id": "a4676c24", "metadata": { "lines_to_next_cell": 0 }, @@ -1680,7 +1682,7 @@ { "cell_type": "code", "execution_count": null, - "id": "aeb2b7bf", + "id": "b5838e7c", "metadata": { "lines_to_next_cell": 0 }, @@ -1707,7 +1709,7 @@ }, { "cell_type": "markdown", - "id": "4305d760", + "id": "2aff4255", "metadata": { "lines_to_next_cell": 0 }, @@ -1721,7 +1723,7 @@ }, { "cell_type": "markdown", - "id": "68c4a8e1", + "id": "a80963e3", "metadata": { "lines_to_next_cell": 0 }, @@ -1738,7 +1740,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a13a8561", + "id": "65e070c3", "metadata": {}, "outputs": [], "source": [ @@ -1760,7 +1762,7 @@ }, { "cell_type": "markdown", - "id": "efd736b5", + "id": "6f5d93bb", "metadata": {}, "source": [ "

      Questions

      \n", @@ -1772,7 +1774,7 @@ }, { "cell_type": "markdown", - "id": "9d4d9d16", + "id": "1c2bf24d", "metadata": {}, "source": [ "

      Checkpoint 5

      \n", @@ -1790,7 +1792,7 @@ }, { "cell_type": "markdown", - "id": "ec8f2497", + "id": "c5cb1ade", "metadata": {}, "source": [ "# Bonus!\n", From 027566236350c6d3c8ef76c09b193bee4a81fbeb Mon Sep 17 00:00:00 2001 From: Larissa Heinrich Date: Mon, 26 Aug 2024 20:36:29 +0000 Subject: [PATCH 08/11] fix task numbering --- solution.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solution.py b/solution.py index ee6ad15..00e477c 100644 --- a/solution.py +++ b/solution.py @@ -1102,7 +1102,7 @@ def visualize_color_attribution_and_counterfactual( plt.show() # %% [markdown] -#

      Task 5.1: Adding color to the style space

      +#

      Task 5.2: Adding color to the style space

      # We know that color is important. Does interpreting the style space as colors help us understand better? # # Let's use the style space to color the PCA plot. @@ -1133,7 +1133,7 @@ def visualize_color_attribution_and_counterfactual( #
    • Can you see any patterns in the colors? Is the space smooth, for example?
    • #
    # %% [markdown] -#

    Task 5.2: Using the images to color the style space

    +#

    Task 5.3: Using the images to color the style space

    # Finally, let's just use the colors from the images themselves! # The maximum value in the image (since they are "black-and-color") can be used as a color! # From 99c869d597c4434297e17ee10d5a4857c4bb8831 Mon Sep 17 00:00:00 2001 From: neptunes5thmoon Date: Mon, 26 Aug 2024 20:37:04 +0000 Subject: [PATCH 09/11] Commit from GitHub Actions (Build Notebooks) --- exercise.ipynb | 208 ++++++++++++++++++++++++------------------------- solution.ipynb | 208 ++++++++++++++++++++++++------------------------- 2 files changed, 208 insertions(+), 208 deletions(-) diff --git a/exercise.ipynb b/exercise.ipynb index ab54c2d..50d5adb 100644 --- a/exercise.ipynb +++ b/exercise.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "0e12b745", + "id": "be4e7a97", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "a5a4c73b", + "id": "a1e6c3cd", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "caee6e2d", + "id": "bc9d8f39", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60be885c", + "id": "5a28affe", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "17dfe28f", + "id": "3fb92e01", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e22f0615", + "id": "225052d7", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "b69e6a45", + "id": "a878ce13", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "37df023f", + "id": "1f8d5401", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a98e6cb7", + "id": "19f50056", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -155,7 +155,7 @@ }, { "cell_type": "markdown", - "id": "250e04ae", + "id": "6dd3913c", "metadata": { "lines_to_next_cell": 0 }, @@ -166,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c0bf76da", + "id": "060a05f5", "metadata": { "lines_to_next_cell": 0 }, @@ -195,7 +195,7 @@ }, { "cell_type": "markdown", - "id": "d3657082", + "id": "2616d093", "metadata": { "lines_to_next_cell": 0 }, @@ -212,7 +212,7 @@ }, { "cell_type": "markdown", - "id": "d7d6d20b", + "id": "76fae027", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -222,7 +222,7 @@ }, { "cell_type": "markdown", - "id": "2651461e", + "id": "5284eaf7", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -235,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a6c14a4", + "id": "57eb7e30", "metadata": { "tags": [] }, @@ -253,7 +253,7 @@ }, { "cell_type": "markdown", - "id": "ea5ae280", + "id": "5dd749a7", "metadata": { "tags": [] }, @@ -269,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c3089484", + "id": "7d56e7b9", "metadata": { "tags": [ "task" @@ -290,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48053a52", + "id": "f1942f36", "metadata": { "tags": [] }, @@ -303,7 +303,7 @@ }, { "cell_type": "markdown", - "id": "a171cfd0", + "id": "7e5d1815", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -315,7 +315,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5f1fa7f4", + "id": "d5103ffb", "metadata": { "tags": [] }, @@ -343,7 +343,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6d30759d", + "id": "858a7c14", "metadata": { "tags": [] }, @@ -356,7 +356,7 @@ }, { "cell_type": "markdown", - "id": "201452d4", + "id": "78c186ef", "metadata": { "lines_to_next_cell": 2 }, @@ -370,7 +370,7 @@ }, { "cell_type": "markdown", - "id": "212b9a38", + "id": "5e3aa105", "metadata": { "lines_to_next_cell": 0 }, @@ -383,7 +383,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ac0d72aa", + "id": "8171ef5a", "metadata": {}, "outputs": [], "source": [ @@ -408,7 +408,7 @@ }, { "cell_type": "markdown", - "id": "445763f0", + "id": "0b65a750", "metadata": { "lines_to_next_cell": 0 }, @@ -422,7 +422,7 @@ }, { "cell_type": "markdown", - "id": "d786f7a9", + "id": "06d9fccd", "metadata": {}, "source": [ "\n", @@ -448,7 +448,7 @@ }, { "cell_type": "markdown", - "id": "ee1ed4c2", + "id": "1cf3195f", "metadata": {}, "source": [ "

    Task 2.3: Use random noise as a baseline

    \n", @@ -460,7 +460,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a2d9d5ff", + "id": "fbefdb10", "metadata": { "tags": [ "task" @@ -482,7 +482,7 @@ }, { "cell_type": "markdown", - "id": "1ba578d2", + "id": "e31bd3cf", "metadata": { "tags": [] }, @@ -496,7 +496,7 @@ { "cell_type": "code", "execution_count": null, - "id": "da1b4a9a", + "id": "98de5d82", "metadata": { "tags": [ "task" @@ -520,7 +520,7 @@ }, { "cell_type": "markdown", - "id": "2f015ba4", + "id": "290bdbf3", "metadata": { "tags": [] }, @@ -536,7 +536,7 @@ }, { "cell_type": "markdown", - "id": "e087f05d", + "id": "659c5758", "metadata": {}, "source": [ "

    BONUS Task: Using different attributions.

    \n", @@ -550,7 +550,7 @@ }, { "cell_type": "markdown", - "id": "ae76eb75", + "id": "b93071a2", "metadata": {}, "source": [ "

    Checkpoint 2

    \n", @@ -570,7 +570,7 @@ }, { "cell_type": "markdown", - "id": "b2ec6177", + "id": "e9f2b7ae", "metadata": { "lines_to_next_cell": 0 }, @@ -598,7 +598,7 @@ }, { "cell_type": "markdown", - "id": "11737f5e", + "id": "92223fdc", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -621,7 +621,7 @@ { "cell_type": "code", "execution_count": null, - "id": "382b5caf", + "id": "5a72db0b", "metadata": {}, "outputs": [], "source": [ @@ -653,7 +653,7 @@ }, { "cell_type": "markdown", - "id": "09c1a43a", + "id": "01335c40", "metadata": { "lines_to_next_cell": 0 }, @@ -668,7 +668,7 @@ { "cell_type": "code", "execution_count": null, - "id": "966f9091", + "id": "8d0f0203", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -689,7 +689,7 @@ }, { "cell_type": "markdown", - "id": "e254f618", + "id": "186f40a5", "metadata": { "tags": [] }, @@ -704,7 +704,7 @@ }, { "cell_type": "markdown", - "id": "9817ee9d", + "id": "af4cf127", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -721,7 +721,7 @@ { "cell_type": "code", "execution_count": null, - "id": "199fadb0", + "id": "94f07b70", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -735,7 +735,7 @@ }, { "cell_type": "markdown", - "id": "5c1b84fa", + "id": "c2a072a4", "metadata": { "lines_to_next_cell": 0 }, @@ -746,7 +746,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35a1b628", + "id": "f7bb217b", "metadata": {}, "outputs": [], "source": [ @@ -756,7 +756,7 @@ }, { "cell_type": "markdown", - "id": "0741cd51", + "id": "dfdd1272", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -774,7 +774,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9ea04106", + "id": "7ddd581a", "metadata": { "lines_to_next_cell": 0 }, @@ -786,7 +786,7 @@ }, { "cell_type": "markdown", - "id": "df0aa4e4", + "id": "2f7fe43c", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -805,7 +805,7 @@ { "cell_type": "code", "execution_count": null, - "id": "888fdbee", + "id": "f01095a7", "metadata": {}, "outputs": [], "source": [ @@ -814,7 +814,7 @@ }, { "cell_type": "markdown", - "id": "3c6e2c10", + "id": "bad091aa", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -830,7 +830,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c959fb2f", + "id": "e49c55e0", "metadata": {}, "outputs": [], "source": [ @@ -839,7 +839,7 @@ }, { "cell_type": "markdown", - "id": "e7249572", + "id": "af990b9a", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -851,7 +851,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17b8c4b3", + "id": "291f2062", "metadata": {}, "outputs": [], "source": [ @@ -864,7 +864,7 @@ }, { "cell_type": "markdown", - "id": "fedb48d8", + "id": "919aea73", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -878,7 +878,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2ddc0100", + "id": "26f2a9c1", "metadata": {}, "outputs": [], "source": [ @@ -890,7 +890,7 @@ }, { "cell_type": "markdown", - "id": "4d4d2810", + "id": "54780ac3", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -910,7 +910,7 @@ { "cell_type": "code", "execution_count": null, - "id": "432f2f53", + "id": "e7187b29", "metadata": {}, "outputs": [], "source": [ @@ -934,7 +934,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9d6397cf", + "id": "78521f46", "metadata": { "lines_to_next_cell": 2 }, @@ -946,7 +946,7 @@ }, { "cell_type": "markdown", - "id": "779bcf13", + "id": "036e6086", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -968,7 +968,7 @@ }, { "cell_type": "markdown", - "id": "752616b6", + "id": "3b756b7a", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -980,7 +980,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c95ad770", + "id": "20847a1b", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1091,7 +1091,7 @@ }, { "cell_type": "markdown", - "id": "a7cdf035", + "id": "78403aaa", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1103,7 +1103,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4bd99022", + "id": "9bb6c797", "metadata": {}, "outputs": [], "source": [ @@ -1119,7 +1119,7 @@ }, { "cell_type": "markdown", - "id": "2ca0425e", + "id": "c32cbd8b", "metadata": { "tags": [] }, @@ -1134,7 +1134,7 @@ }, { "cell_type": "markdown", - "id": "8ed754f6", + "id": "4c86dd42", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1146,7 +1146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c90cd932", + "id": "8f728ded", "metadata": {}, "outputs": [], "source": [ @@ -1168,7 +1168,7 @@ }, { "cell_type": "markdown", - "id": "23d27b74", + "id": "028d3bbc", "metadata": { "tags": [] }, @@ -1184,7 +1184,7 @@ }, { "cell_type": "markdown", - "id": "0afeb5a4", + "id": "b63aac86", "metadata": { "tags": [] }, @@ -1194,7 +1194,7 @@ }, { "cell_type": "markdown", - "id": "61e418f1", + "id": "b0ad5935", "metadata": { "tags": [] }, @@ -1211,7 +1211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7b6c2411", + "id": "0cf2b9a5", "metadata": { "title": "Loading the test dataset" }, @@ -1231,7 +1231,7 @@ }, { "cell_type": "markdown", - "id": "26cd8983", + "id": "35cc9b35", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1243,7 +1243,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f1f5cc1e", + "id": "4146969c", "metadata": {}, "outputs": [], "source": [ @@ -1256,7 +1256,7 @@ }, { "cell_type": "markdown", - "id": "f3cd6599", + "id": "f99f676c", "metadata": { "lines_to_next_cell": 0 }, @@ -1266,7 +1266,7 @@ }, { "cell_type": "markdown", - "id": "cefcba31", + "id": "a7450339", "metadata": { "lines_to_next_cell": 0 }, @@ -1284,7 +1284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8b706e0a", + "id": "58973345", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1320,7 +1320,7 @@ }, { "cell_type": "markdown", - "id": "bfda4b7f", + "id": "20af6915", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1332,7 +1332,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a5b2f903", + "id": "4543762f", "metadata": {}, "outputs": [], "source": [ @@ -1345,7 +1345,7 @@ }, { "cell_type": "markdown", - "id": "935009d9", + "id": "d0b8a2ec", "metadata": { "tags": [] }, @@ -1360,7 +1360,7 @@ }, { "cell_type": "markdown", - "id": "7683e70e", + "id": "61e58d7f", "metadata": { "tags": [] }, @@ -1371,7 +1371,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3532d6ed", + "id": "b8dc4640", "metadata": {}, "outputs": [], "source": [ @@ -1385,7 +1385,7 @@ }, { "cell_type": "markdown", - "id": "c8c90e6b", + "id": "ed9b7104", "metadata": { "tags": [] }, @@ -1400,7 +1400,7 @@ }, { "cell_type": "markdown", - "id": "c4c3eff4", + "id": "0add70ab", "metadata": { "lines_to_next_cell": 0 }, @@ -1415,7 +1415,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1eff97c0", + "id": "274d8226", "metadata": {}, "outputs": [], "source": [ @@ -1436,7 +1436,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c1281384", + "id": "b6b54bbe", "metadata": { "title": "Another visualization function" }, @@ -1465,7 +1465,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42cf09ad", + "id": "979ca20f", "metadata": { "lines_to_next_cell": 0 }, @@ -1481,7 +1481,7 @@ }, { "cell_type": "markdown", - "id": "30ea4f96", + "id": "b779b535", "metadata": { "lines_to_next_cell": 0 }, @@ -1497,7 +1497,7 @@ }, { "cell_type": "markdown", - "id": "6768c3d0", + "id": "c44f0cf1", "metadata": { "lines_to_next_cell": 0 }, @@ -1512,7 +1512,7 @@ }, { "cell_type": "markdown", - "id": "0287a687", + "id": "f402bb74", "metadata": { "lines_to_next_cell": 0 }, @@ -1526,7 +1526,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6459abb5", + "id": "12542a6d", "metadata": { "lines_to_next_cell": 0 }, @@ -1546,7 +1546,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ca40f5a7", + "id": "8d3e6d86", "metadata": { "lines_to_next_cell": 0 }, @@ -1582,7 +1582,7 @@ }, { "cell_type": "markdown", - "id": "94276a21", + "id": "ca1d1242", "metadata": { "lines_to_next_cell": 0 }, @@ -1596,7 +1596,7 @@ }, { "cell_type": "markdown", - "id": "a7597f89", + "id": "6f530e82", "metadata": { "lines_to_next_cell": 0 }, @@ -1612,7 +1612,7 @@ }, { "cell_type": "markdown", - "id": "9cb53f3f", + "id": "5904b20b", "metadata": { "lines_to_next_cell": 0 }, @@ -1628,7 +1628,7 @@ }, { "cell_type": "markdown", - "id": "6a43a957", + "id": "72ca6d87", "metadata": { "lines_to_next_cell": 0 }, @@ -1651,7 +1651,7 @@ }, { "cell_type": "markdown", - "id": "50e69413", + "id": "a0baa037", "metadata": {}, "source": [ "

    Task 5.1: Explore the style space

    \n", @@ -1663,7 +1663,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c71b768e", + "id": "bc0062db", "metadata": {}, "outputs": [], "source": [ @@ -1698,12 +1698,12 @@ }, { "cell_type": "markdown", - "id": "a4676c24", + "id": "ba428131", "metadata": { "lines_to_next_cell": 0 }, "source": [ - "

    Task 5.1: Adding color to the style space

    \n", + "

    Task 5.2: Adding color to the style space

    \n", "We know that color is important. Does interpreting the style space as colors help us understand better?\n", "\n", "Let's use the style space to color the PCA plot.\n", @@ -1714,7 +1714,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b5838e7c", + "id": "2ee2e061", "metadata": { "lines_to_next_cell": 0 }, @@ -1741,7 +1741,7 @@ }, { "cell_type": "markdown", - "id": "2aff4255", + "id": "a26903b8", "metadata": { "lines_to_next_cell": 0 }, @@ -1755,12 +1755,12 @@ }, { "cell_type": "markdown", - "id": "a80963e3", + "id": "7f72dfb5", "metadata": { "lines_to_next_cell": 0 }, "source": [ - "

    Task 5.2: Using the images to color the style space

    \n", + "

    Task 5.3: Using the images to color the style space

    \n", "Finally, let's just use the colors from the images themselves!\n", "The maximum value in the image (since they are \"black-and-color\") can be used as a color!\n", "\n", @@ -1772,7 +1772,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65e070c3", + "id": "be62a09b", "metadata": {}, "outputs": [], "source": [ @@ -1794,7 +1794,7 @@ }, { "cell_type": "markdown", - "id": "6f5d93bb", + "id": "f16385e4", "metadata": {}, "source": [ "

    Questions

    \n", @@ -1806,7 +1806,7 @@ }, { "cell_type": "markdown", - "id": "1c2bf24d", + "id": "e0f59eee", "metadata": {}, "source": [ "

    Checkpoint 5

    \n", @@ -1824,7 +1824,7 @@ }, { "cell_type": "markdown", - "id": "c5cb1ade", + "id": "0bbc6cd2", "metadata": {}, "source": [ "# Bonus!\n", diff --git a/solution.ipynb b/solution.ipynb index 8e26901..d4808b6 100644 --- a/solution.ipynb +++ b/solution.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "0e12b745", + "id": "be4e7a97", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "a5a4c73b", + "id": "a1e6c3cd", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "caee6e2d", + "id": "bc9d8f39", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60be885c", + "id": "5a28affe", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "17dfe28f", + "id": "3fb92e01", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e22f0615", + "id": "225052d7", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "b69e6a45", + "id": "a878ce13", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "37df023f", + "id": "1f8d5401", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9f9053c6", + "id": "dc1f9832", "metadata": { "tags": [ "solution" @@ -154,7 +154,7 @@ }, { "cell_type": "markdown", - "id": "250e04ae", + "id": "6dd3913c", "metadata": { "lines_to_next_cell": 0 }, @@ -165,7 +165,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c0bf76da", + "id": "060a05f5", "metadata": { "lines_to_next_cell": 0 }, @@ -194,7 +194,7 @@ }, { "cell_type": "markdown", - "id": "d3657082", + "id": "2616d093", "metadata": { "lines_to_next_cell": 0 }, @@ -211,7 +211,7 @@ }, { "cell_type": "markdown", - "id": "d7d6d20b", + "id": "76fae027", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -221,7 +221,7 @@ }, { "cell_type": "markdown", - "id": "2651461e", + "id": "5284eaf7", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -234,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0a6c14a4", + "id": "57eb7e30", "metadata": { "tags": [] }, @@ -252,7 +252,7 @@ }, { "cell_type": "markdown", - "id": "ea5ae280", + "id": "5dd749a7", "metadata": { "tags": [] }, @@ -268,7 +268,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7eb845d8", + "id": "9e77b469", "metadata": { "tags": [ "solution" @@ -292,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "48053a52", + "id": "f1942f36", "metadata": { "tags": [] }, @@ -305,7 +305,7 @@ }, { "cell_type": "markdown", - "id": "a171cfd0", + "id": "7e5d1815", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -317,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5f1fa7f4", + "id": "d5103ffb", "metadata": { "tags": [] }, @@ -345,7 +345,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6d30759d", + "id": "858a7c14", "metadata": { "tags": [] }, @@ -358,7 +358,7 @@ }, { "cell_type": "markdown", - "id": "201452d4", + "id": "78c186ef", "metadata": { "lines_to_next_cell": 2 }, @@ -372,7 +372,7 @@ }, { "cell_type": "markdown", - "id": "212b9a38", + "id": "5e3aa105", "metadata": { "lines_to_next_cell": 0 }, @@ -385,7 +385,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ac0d72aa", + "id": "8171ef5a", "metadata": {}, "outputs": [], "source": [ @@ -410,7 +410,7 @@ }, { "cell_type": "markdown", - "id": "445763f0", + "id": "0b65a750", "metadata": { "lines_to_next_cell": 0 }, @@ -424,7 +424,7 @@ }, { "cell_type": "markdown", - "id": "d786f7a9", + "id": "06d9fccd", "metadata": {}, "source": [ "\n", @@ -450,7 +450,7 @@ }, { "cell_type": "markdown", - "id": "ee1ed4c2", + "id": "1cf3195f", "metadata": {}, "source": [ "

    Task 2.3: Use random noise as a baseline

    \n", @@ -462,7 +462,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a3d5d688", + "id": "b09d2144", "metadata": { "tags": [ "solution" @@ -488,7 +488,7 @@ }, { "cell_type": "markdown", - "id": "1ba578d2", + "id": "e31bd3cf", "metadata": { "tags": [] }, @@ -502,7 +502,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9e7deaa3", + "id": "af1e6318", "metadata": { "tags": [ "solution" @@ -532,7 +532,7 @@ }, { "cell_type": "markdown", - "id": "2f015ba4", + "id": "290bdbf3", "metadata": { "tags": [] }, @@ -548,7 +548,7 @@ }, { "cell_type": "markdown", - "id": "e087f05d", + "id": "659c5758", "metadata": {}, "source": [ "

    BONUS Task: Using different attributions.

    \n", @@ -562,7 +562,7 @@ }, { "cell_type": "markdown", - "id": "ae76eb75", + "id": "b93071a2", "metadata": {}, "source": [ "

    Checkpoint 2

    \n", @@ -582,7 +582,7 @@ }, { "cell_type": "markdown", - "id": "b2ec6177", + "id": "e9f2b7ae", "metadata": { "lines_to_next_cell": 0 }, @@ -610,7 +610,7 @@ }, { "cell_type": "markdown", - "id": "11737f5e", + "id": "92223fdc", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -633,7 +633,7 @@ { "cell_type": "code", "execution_count": null, - "id": "382b5caf", + "id": "5a72db0b", "metadata": {}, "outputs": [], "source": [ @@ -665,7 +665,7 @@ }, { "cell_type": "markdown", - "id": "09c1a43a", + "id": "01335c40", "metadata": { "lines_to_next_cell": 0 }, @@ -680,7 +680,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7371a75e", + "id": "d86d3dcc", "metadata": { "tags": [ "solution" @@ -697,7 +697,7 @@ }, { "cell_type": "markdown", - "id": "e254f618", + "id": "186f40a5", "metadata": { "tags": [] }, @@ -712,7 +712,7 @@ }, { "cell_type": "markdown", - "id": "9817ee9d", + "id": "af4cf127", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -729,7 +729,7 @@ { "cell_type": "code", "execution_count": null, - "id": "68709b9f", + "id": "1f145891", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -743,7 +743,7 @@ }, { "cell_type": "markdown", - "id": "5c1b84fa", + "id": "c2a072a4", "metadata": { "lines_to_next_cell": 0 }, @@ -754,7 +754,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35a1b628", + "id": "f7bb217b", "metadata": {}, "outputs": [], "source": [ @@ -764,7 +764,7 @@ }, { "cell_type": "markdown", - "id": "0741cd51", + "id": "dfdd1272", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -782,7 +782,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9ea04106", + "id": "7ddd581a", "metadata": { "lines_to_next_cell": 0 }, @@ -794,7 +794,7 @@ }, { "cell_type": "markdown", - "id": "df0aa4e4", + "id": "2f7fe43c", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -813,7 +813,7 @@ { "cell_type": "code", "execution_count": null, - "id": "888fdbee", + "id": "f01095a7", "metadata": {}, "outputs": [], "source": [ @@ -822,7 +822,7 @@ }, { "cell_type": "markdown", - "id": "3c6e2c10", + "id": "bad091aa", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -838,7 +838,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c959fb2f", + "id": "e49c55e0", "metadata": {}, "outputs": [], "source": [ @@ -847,7 +847,7 @@ }, { "cell_type": "markdown", - "id": "e7249572", + "id": "af990b9a", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -859,7 +859,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17b8c4b3", + "id": "291f2062", "metadata": {}, "outputs": [], "source": [ @@ -872,7 +872,7 @@ }, { "cell_type": "markdown", - "id": "fedb48d8", + "id": "919aea73", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -886,7 +886,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2ddc0100", + "id": "26f2a9c1", "metadata": {}, "outputs": [], "source": [ @@ -898,7 +898,7 @@ }, { "cell_type": "markdown", - "id": "4d4d2810", + "id": "54780ac3", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -918,7 +918,7 @@ { "cell_type": "code", "execution_count": null, - "id": "432f2f53", + "id": "e7187b29", "metadata": {}, "outputs": [], "source": [ @@ -942,7 +942,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9d6397cf", + "id": "78521f46", "metadata": { "lines_to_next_cell": 2 }, @@ -954,7 +954,7 @@ }, { "cell_type": "markdown", - "id": "779bcf13", + "id": "036e6086", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -976,7 +976,7 @@ }, { "cell_type": "markdown", - "id": "752616b6", + "id": "3b756b7a", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -988,7 +988,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19d01864", + "id": "8256f4b5", "metadata": { "lines_to_next_cell": 2, "tags": [ @@ -1058,7 +1058,7 @@ }, { "cell_type": "markdown", - "id": "a7cdf035", + "id": "78403aaa", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1070,7 +1070,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4bd99022", + "id": "9bb6c797", "metadata": {}, "outputs": [], "source": [ @@ -1086,7 +1086,7 @@ }, { "cell_type": "markdown", - "id": "2ca0425e", + "id": "c32cbd8b", "metadata": { "tags": [] }, @@ -1101,7 +1101,7 @@ }, { "cell_type": "markdown", - "id": "8ed754f6", + "id": "4c86dd42", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1113,7 +1113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c90cd932", + "id": "8f728ded", "metadata": {}, "outputs": [], "source": [ @@ -1135,7 +1135,7 @@ }, { "cell_type": "markdown", - "id": "23d27b74", + "id": "028d3bbc", "metadata": { "tags": [] }, @@ -1151,7 +1151,7 @@ }, { "cell_type": "markdown", - "id": "0afeb5a4", + "id": "b63aac86", "metadata": { "tags": [] }, @@ -1161,7 +1161,7 @@ }, { "cell_type": "markdown", - "id": "61e418f1", + "id": "b0ad5935", "metadata": { "tags": [] }, @@ -1178,7 +1178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7b6c2411", + "id": "0cf2b9a5", "metadata": { "title": "Loading the test dataset" }, @@ -1198,7 +1198,7 @@ }, { "cell_type": "markdown", - "id": "26cd8983", + "id": "35cc9b35", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1210,7 +1210,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f1f5cc1e", + "id": "4146969c", "metadata": {}, "outputs": [], "source": [ @@ -1223,7 +1223,7 @@ }, { "cell_type": "markdown", - "id": "f3cd6599", + "id": "f99f676c", "metadata": { "lines_to_next_cell": 0 }, @@ -1233,7 +1233,7 @@ }, { "cell_type": "markdown", - "id": "cefcba31", + "id": "a7450339", "metadata": { "lines_to_next_cell": 0 }, @@ -1251,7 +1251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7f0d387c", + "id": "f2bfd025", "metadata": { "tags": [ "solution" @@ -1288,7 +1288,7 @@ }, { "cell_type": "markdown", - "id": "bfda4b7f", + "id": "20af6915", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1300,7 +1300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a5b2f903", + "id": "4543762f", "metadata": {}, "outputs": [], "source": [ @@ -1313,7 +1313,7 @@ }, { "cell_type": "markdown", - "id": "935009d9", + "id": "d0b8a2ec", "metadata": { "tags": [] }, @@ -1328,7 +1328,7 @@ }, { "cell_type": "markdown", - "id": "7683e70e", + "id": "61e58d7f", "metadata": { "tags": [] }, @@ -1339,7 +1339,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3532d6ed", + "id": "b8dc4640", "metadata": {}, "outputs": [], "source": [ @@ -1353,7 +1353,7 @@ }, { "cell_type": "markdown", - "id": "c8c90e6b", + "id": "ed9b7104", "metadata": { "tags": [] }, @@ -1368,7 +1368,7 @@ }, { "cell_type": "markdown", - "id": "c4c3eff4", + "id": "0add70ab", "metadata": { "lines_to_next_cell": 0 }, @@ -1383,7 +1383,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1eff97c0", + "id": "274d8226", "metadata": {}, "outputs": [], "source": [ @@ -1404,7 +1404,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c1281384", + "id": "b6b54bbe", "metadata": { "title": "Another visualization function" }, @@ -1433,7 +1433,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42cf09ad", + "id": "979ca20f", "metadata": { "lines_to_next_cell": 0 }, @@ -1449,7 +1449,7 @@ }, { "cell_type": "markdown", - "id": "30ea4f96", + "id": "b779b535", "metadata": { "lines_to_next_cell": 0 }, @@ -1465,7 +1465,7 @@ }, { "cell_type": "markdown", - "id": "6768c3d0", + "id": "c44f0cf1", "metadata": { "lines_to_next_cell": 0 }, @@ -1480,7 +1480,7 @@ }, { "cell_type": "markdown", - "id": "0287a687", + "id": "f402bb74", "metadata": { "lines_to_next_cell": 0 }, @@ -1494,7 +1494,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6459abb5", + "id": "12542a6d", "metadata": { "lines_to_next_cell": 0 }, @@ -1514,7 +1514,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ca40f5a7", + "id": "8d3e6d86", "metadata": { "lines_to_next_cell": 0 }, @@ -1550,7 +1550,7 @@ }, { "cell_type": "markdown", - "id": "94276a21", + "id": "ca1d1242", "metadata": { "lines_to_next_cell": 0 }, @@ -1564,7 +1564,7 @@ }, { "cell_type": "markdown", - "id": "a7597f89", + "id": "6f530e82", "metadata": { "lines_to_next_cell": 0 }, @@ -1580,7 +1580,7 @@ }, { "cell_type": "markdown", - "id": "9cb53f3f", + "id": "5904b20b", "metadata": { "lines_to_next_cell": 0 }, @@ -1596,7 +1596,7 @@ }, { "cell_type": "markdown", - "id": "6a43a957", + "id": "72ca6d87", "metadata": { "lines_to_next_cell": 0 }, @@ -1619,7 +1619,7 @@ }, { "cell_type": "markdown", - "id": "50e69413", + "id": "a0baa037", "metadata": {}, "source": [ "

    Task 5.1: Explore the style space

    \n", @@ -1631,7 +1631,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c71b768e", + "id": "bc0062db", "metadata": {}, "outputs": [], "source": [ @@ -1666,12 +1666,12 @@ }, { "cell_type": "markdown", - "id": "a4676c24", + "id": "ba428131", "metadata": { "lines_to_next_cell": 0 }, "source": [ - "

    Task 5.1: Adding color to the style space

    \n", + "

    Task 5.2: Adding color to the style space

    \n", "We know that color is important. Does interpreting the style space as colors help us understand better?\n", "\n", "Let's use the style space to color the PCA plot.\n", @@ -1682,7 +1682,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b5838e7c", + "id": "2ee2e061", "metadata": { "lines_to_next_cell": 0 }, @@ -1709,7 +1709,7 @@ }, { "cell_type": "markdown", - "id": "2aff4255", + "id": "a26903b8", "metadata": { "lines_to_next_cell": 0 }, @@ -1723,12 +1723,12 @@ }, { "cell_type": "markdown", - "id": "a80963e3", + "id": "7f72dfb5", "metadata": { "lines_to_next_cell": 0 }, "source": [ - "

    Task 5.2: Using the images to color the style space

    \n", + "

    Task 5.3: Using the images to color the style space

    \n", "Finally, let's just use the colors from the images themselves!\n", "The maximum value in the image (since they are \"black-and-color\") can be used as a color!\n", "\n", @@ -1740,7 +1740,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65e070c3", + "id": "be62a09b", "metadata": {}, "outputs": [], "source": [ @@ -1762,7 +1762,7 @@ }, { "cell_type": "markdown", - "id": "6f5d93bb", + "id": "f16385e4", "metadata": {}, "source": [ "

    Questions

    \n", @@ -1774,7 +1774,7 @@ }, { "cell_type": "markdown", - "id": "1c2bf24d", + "id": "e0f59eee", "metadata": {}, "source": [ "

    Checkpoint 5

    \n", @@ -1792,7 +1792,7 @@ }, { "cell_type": "markdown", - "id": "c5cb1ade", + "id": "0bbc6cd2", "metadata": {}, "source": [ "# Bonus!\n", From b51f0430193091910e56a1aedc7bf5fe8ea2530b Mon Sep 17 00:00:00 2001 From: Larissa Heinrich Date: Mon, 26 Aug 2024 20:55:53 +0000 Subject: [PATCH 10/11] fix rendering for last few cells --- solution.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solution.py b/solution.py index 00e477c..c70861b 100644 --- a/solution.py +++ b/solution.py @@ -1186,11 +1186,11 @@ def visualize_color_attribution_and_counterfactual( #
  • What happens if you add a Sigmoid activation to the output of the style encoder?
  • # See what else you can think of, and see how finnicky training a GAN can be! -## %% [markdown] tags=["solution"] +# %% [markdown] tags=["solution"] # The colors for the classes are sampled from matplotlib colormaps! They are the four seasons: spring, summer, autumn, and winter. # Check your style space again to see if you can see the patterns now! -## %% tags=["solution"] +# %% tags=["solution"] # Let's plot the colormaps import matplotlib as mpl import numpy as np From 4ee7e52b5f54291123999c2219ff52d33cbf004a Mon Sep 17 00:00:00 2001 From: neptunes5thmoon Date: Mon, 26 Aug 2024 20:56:44 +0000 Subject: [PATCH 11/11] Commit from GitHub Actions (Build Notebooks) --- exercise.ipynb | 245 +++++++++++++++++++++---------------------------- solution.ipynb | 238 +++++++++++++++++++++++++---------------------- 2 files changed, 232 insertions(+), 251 deletions(-) diff --git a/exercise.ipynb b/exercise.ipynb index 50d5adb..4d5da75 100644 --- a/exercise.ipynb +++ b/exercise.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "be4e7a97", + "id": "5836c5fd", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "a1e6c3cd", + "id": "c7ee63fa", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "bc9d8f39", + "id": "25948d26", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5a28affe", + "id": "8b8c18b6", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "3fb92e01", + "id": "2d9a1fa2", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "225052d7", + "id": "33bedaea", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "a878ce13", + "id": "d8c857dc", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "1f8d5401", + "id": "04aa9880", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19f50056", + "id": "8d3cf42c", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -155,7 +155,7 @@ }, { "cell_type": "markdown", - "id": "6dd3913c", + "id": "252e596c", "metadata": { "lines_to_next_cell": 0 }, @@ -166,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "060a05f5", + "id": "2bb3f66f", "metadata": { "lines_to_next_cell": 0 }, @@ -195,7 +195,7 @@ }, { "cell_type": "markdown", - "id": "2616d093", + "id": "ad6e8d02", "metadata": { "lines_to_next_cell": 0 }, @@ -212,7 +212,7 @@ }, { "cell_type": "markdown", - "id": "76fae027", + "id": "8ff2253c", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -222,7 +222,7 @@ }, { "cell_type": "markdown", - "id": "5284eaf7", + "id": "137f5e29", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -235,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57eb7e30", + "id": "877b0dc5", "metadata": { "tags": [] }, @@ -253,7 +253,7 @@ }, { "cell_type": "markdown", - "id": "5dd749a7", + "id": "346433e9", "metadata": { "tags": [] }, @@ -269,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7d56e7b9", + "id": "2ed39b81", "metadata": { "tags": [ "task" @@ -290,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f1942f36", + "id": "d182aa7a", "metadata": { "tags": [] }, @@ -303,7 +303,7 @@ }, { "cell_type": "markdown", - "id": "7e5d1815", + "id": "ef1e380d", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -315,7 +315,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d5103ffb", + "id": "3fce7d60", "metadata": { "tags": [] }, @@ -343,7 +343,7 @@ { "cell_type": "code", "execution_count": null, - "id": "858a7c14", + "id": "d38da2c2", "metadata": { "tags": [] }, @@ -356,7 +356,7 @@ }, { "cell_type": "markdown", - "id": "78c186ef", + "id": "aff26564", "metadata": { "lines_to_next_cell": 2 }, @@ -370,7 +370,7 @@ }, { "cell_type": "markdown", - "id": "5e3aa105", + "id": "53ffc390", "metadata": { "lines_to_next_cell": 0 }, @@ -383,7 +383,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8171ef5a", + "id": "160b27d5", "metadata": {}, "outputs": [], "source": [ @@ -408,7 +408,7 @@ }, { "cell_type": "markdown", - "id": "0b65a750", + "id": "42462cab", "metadata": { "lines_to_next_cell": 0 }, @@ -422,7 +422,7 @@ }, { "cell_type": "markdown", - "id": "06d9fccd", + "id": "b4cd9eac", "metadata": {}, "source": [ "\n", @@ -448,7 +448,7 @@ }, { "cell_type": "markdown", - "id": "1cf3195f", + "id": "dc603cd7", "metadata": {}, "source": [ "

    Task 2.3: Use random noise as a baseline

    \n", @@ -460,7 +460,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fbefdb10", + "id": "9be598b0", "metadata": { "tags": [ "task" @@ -482,7 +482,7 @@ }, { "cell_type": "markdown", - "id": "e31bd3cf", + "id": "52c0979f", "metadata": { "tags": [] }, @@ -496,7 +496,7 @@ { "cell_type": "code", "execution_count": null, - "id": "98de5d82", + "id": "d392024c", "metadata": { "tags": [ "task" @@ -520,7 +520,7 @@ }, { "cell_type": "markdown", - "id": "290bdbf3", + "id": "30b56566", "metadata": { "tags": [] }, @@ -536,7 +536,7 @@ }, { "cell_type": "markdown", - "id": "659c5758", + "id": "ee7c164c", "metadata": {}, "source": [ "

    BONUS Task: Using different attributions.

    \n", @@ -550,7 +550,7 @@ }, { "cell_type": "markdown", - "id": "b93071a2", + "id": "71b2e6fa", "metadata": {}, "source": [ "

    Checkpoint 2

    \n", @@ -570,7 +570,7 @@ }, { "cell_type": "markdown", - "id": "e9f2b7ae", + "id": "e0966908", "metadata": { "lines_to_next_cell": 0 }, @@ -598,7 +598,7 @@ }, { "cell_type": "markdown", - "id": "92223fdc", + "id": "50bfe53d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -621,7 +621,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5a72db0b", + "id": "b1ab5f6d", "metadata": {}, "outputs": [], "source": [ @@ -653,7 +653,7 @@ }, { "cell_type": "markdown", - "id": "01335c40", + "id": "7029ce21", "metadata": { "lines_to_next_cell": 0 }, @@ -668,7 +668,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8d0f0203", + "id": "48bc8945", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -689,7 +689,7 @@ }, { "cell_type": "markdown", - "id": "186f40a5", + "id": "156d8774", "metadata": { "tags": [] }, @@ -704,7 +704,7 @@ }, { "cell_type": "markdown", - "id": "af4cf127", + "id": "48a541a1", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -721,7 +721,7 @@ { "cell_type": "code", "execution_count": null, - "id": "94f07b70", + "id": "433257a6", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -735,7 +735,7 @@ }, { "cell_type": "markdown", - "id": "c2a072a4", + "id": "b16b319e", "metadata": { "lines_to_next_cell": 0 }, @@ -746,7 +746,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f7bb217b", + "id": "1e85b7b2", "metadata": {}, "outputs": [], "source": [ @@ -756,7 +756,7 @@ }, { "cell_type": "markdown", - "id": "dfdd1272", + "id": "dc1c55cf", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -774,7 +774,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7ddd581a", + "id": "33e0ea83", "metadata": { "lines_to_next_cell": 0 }, @@ -786,7 +786,7 @@ }, { "cell_type": "markdown", - "id": "2f7fe43c", + "id": "6bbe594d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -805,7 +805,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f01095a7", + "id": "7b5f7065", "metadata": {}, "outputs": [], "source": [ @@ -814,7 +814,7 @@ }, { "cell_type": "markdown", - "id": "bad091aa", + "id": "c4886cf1", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -830,7 +830,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e49c55e0", + "id": "1f2d9558", "metadata": {}, "outputs": [], "source": [ @@ -839,7 +839,7 @@ }, { "cell_type": "markdown", - "id": "af990b9a", + "id": "6632b7c6", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -851,7 +851,7 @@ { "cell_type": "code", "execution_count": null, - "id": "291f2062", + "id": "2b336a53", "metadata": {}, "outputs": [], "source": [ @@ -864,7 +864,7 @@ }, { "cell_type": "markdown", - "id": "919aea73", + "id": "b7c49115", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -878,7 +878,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26f2a9c1", + "id": "e076c809", "metadata": {}, "outputs": [], "source": [ @@ -890,7 +890,7 @@ }, { "cell_type": "markdown", - "id": "54780ac3", + "id": "9d7003f8", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -910,7 +910,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e7187b29", + "id": "f3b60479", "metadata": {}, "outputs": [], "source": [ @@ -934,7 +934,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78521f46", + "id": "103b94b0", "metadata": { "lines_to_next_cell": 2 }, @@ -946,7 +946,7 @@ }, { "cell_type": "markdown", - "id": "036e6086", + "id": "d146e4f8", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -968,7 +968,7 @@ }, { "cell_type": "markdown", - "id": "3b756b7a", + "id": "11e0f5dd", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -980,7 +980,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20847a1b", + "id": "79722273", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1091,7 +1091,7 @@ }, { "cell_type": "markdown", - "id": "78403aaa", + "id": "5a0870e3", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1103,7 +1103,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9bb6c797", + "id": "9c9a471f", "metadata": {}, "outputs": [], "source": [ @@ -1119,7 +1119,7 @@ }, { "cell_type": "markdown", - "id": "c32cbd8b", + "id": "f01e0c51", "metadata": { "tags": [] }, @@ -1134,7 +1134,7 @@ }, { "cell_type": "markdown", - "id": "4c86dd42", + "id": "3c545933", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1146,7 +1146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8f728ded", + "id": "74225676", "metadata": {}, "outputs": [], "source": [ @@ -1168,7 +1168,7 @@ }, { "cell_type": "markdown", - "id": "028d3bbc", + "id": "b83dfcd2", "metadata": { "tags": [] }, @@ -1184,7 +1184,7 @@ }, { "cell_type": "markdown", - "id": "b63aac86", + "id": "740be1a4", "metadata": { "tags": [] }, @@ -1194,7 +1194,7 @@ }, { "cell_type": "markdown", - "id": "b0ad5935", + "id": "2a44a05e", "metadata": { "tags": [] }, @@ -1211,7 +1211,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0cf2b9a5", + "id": "8d711fbe", "metadata": { "title": "Loading the test dataset" }, @@ -1231,7 +1231,7 @@ }, { "cell_type": "markdown", - "id": "35cc9b35", + "id": "7c1d56b5", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1243,7 +1243,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4146969c", + "id": "f649373f", "metadata": {}, "outputs": [], "source": [ @@ -1256,7 +1256,7 @@ }, { "cell_type": "markdown", - "id": "f99f676c", + "id": "4a3c2e42", "metadata": { "lines_to_next_cell": 0 }, @@ -1266,7 +1266,7 @@ }, { "cell_type": "markdown", - "id": "a7450339", + "id": "746827ec", "metadata": { "lines_to_next_cell": 0 }, @@ -1284,7 +1284,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58973345", + "id": "855b63d4", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -1320,7 +1320,7 @@ }, { "cell_type": "markdown", - "id": "20af6915", + "id": "9e21d254", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1332,7 +1332,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4543762f", + "id": "143cb6c6", "metadata": {}, "outputs": [], "source": [ @@ -1345,7 +1345,7 @@ }, { "cell_type": "markdown", - "id": "d0b8a2ec", + "id": "a15cdb80", "metadata": { "tags": [] }, @@ -1360,7 +1360,7 @@ }, { "cell_type": "markdown", - "id": "61e58d7f", + "id": "2d2ced5b", "metadata": { "tags": [] }, @@ -1371,7 +1371,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b8dc4640", + "id": "24639718", "metadata": {}, "outputs": [], "source": [ @@ -1385,7 +1385,7 @@ }, { "cell_type": "markdown", - "id": "ed9b7104", + "id": "8afab4b7", "metadata": { "tags": [] }, @@ -1400,7 +1400,7 @@ }, { "cell_type": "markdown", - "id": "0add70ab", + "id": "927a2361", "metadata": { "lines_to_next_cell": 0 }, @@ -1415,7 +1415,7 @@ { "cell_type": "code", "execution_count": null, - "id": "274d8226", + "id": "87aaa903", "metadata": {}, "outputs": [], "source": [ @@ -1436,7 +1436,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b6b54bbe", + "id": "3633b841", "metadata": { "title": "Another visualization function" }, @@ -1465,7 +1465,7 @@ { "cell_type": "code", "execution_count": null, - "id": "979ca20f", + "id": "32f5d3ba", "metadata": { "lines_to_next_cell": 0 }, @@ -1481,7 +1481,7 @@ }, { "cell_type": "markdown", - "id": "b779b535", + "id": "89ecd3a9", "metadata": { "lines_to_next_cell": 0 }, @@ -1497,7 +1497,7 @@ }, { "cell_type": "markdown", - "id": "c44f0cf1", + "id": "19612f27", "metadata": { "lines_to_next_cell": 0 }, @@ -1512,7 +1512,7 @@ }, { "cell_type": "markdown", - "id": "f402bb74", + "id": "f63764f2", "metadata": { "lines_to_next_cell": 0 }, @@ -1526,7 +1526,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12542a6d", + "id": "1bcaa03b", "metadata": { "lines_to_next_cell": 0 }, @@ -1546,7 +1546,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8d3e6d86", + "id": "d3b3324a", "metadata": { "lines_to_next_cell": 0 }, @@ -1582,7 +1582,7 @@ }, { "cell_type": "markdown", - "id": "ca1d1242", + "id": "81ea3fdd", "metadata": { "lines_to_next_cell": 0 }, @@ -1596,7 +1596,7 @@ }, { "cell_type": "markdown", - "id": "6f530e82", + "id": "92c0dba0", "metadata": { "lines_to_next_cell": 0 }, @@ -1612,7 +1612,7 @@ }, { "cell_type": "markdown", - "id": "5904b20b", + "id": "93ad40a6", "metadata": { "lines_to_next_cell": 0 }, @@ -1628,7 +1628,7 @@ }, { "cell_type": "markdown", - "id": "72ca6d87", + "id": "884f4f82", "metadata": { "lines_to_next_cell": 0 }, @@ -1651,7 +1651,7 @@ }, { "cell_type": "markdown", - "id": "a0baa037", + "id": "344c70a0", "metadata": {}, "source": [ "

    Task 5.1: Explore the style space

    \n", @@ -1663,7 +1663,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bc0062db", + "id": "bc06e146", "metadata": {}, "outputs": [], "source": [ @@ -1698,7 +1698,7 @@ }, { "cell_type": "markdown", - "id": "ba428131", + "id": "518c6ddb", "metadata": { "lines_to_next_cell": 0 }, @@ -1714,7 +1714,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2ee2e061", + "id": "40fa1f61", "metadata": { "lines_to_next_cell": 0 }, @@ -1741,7 +1741,7 @@ }, { "cell_type": "markdown", - "id": "a26903b8", + "id": "a204c00f", "metadata": { "lines_to_next_cell": 0 }, @@ -1755,7 +1755,7 @@ }, { "cell_type": "markdown", - "id": "7f72dfb5", + "id": "16971e9c", "metadata": { "lines_to_next_cell": 0 }, @@ -1772,7 +1772,7 @@ { "cell_type": "code", "execution_count": null, - "id": "be62a09b", + "id": "5d04746b", "metadata": {}, "outputs": [], "source": [ @@ -1794,7 +1794,7 @@ }, { "cell_type": "markdown", - "id": "f16385e4", + "id": "7dd5245c", "metadata": {}, "source": [ "

    Questions

    \n", @@ -1806,7 +1806,7 @@ }, { "cell_type": "markdown", - "id": "e0f59eee", + "id": "4ab3793f", "metadata": {}, "source": [ "

    Checkpoint 5

    \n", @@ -1824,7 +1824,7 @@ }, { "cell_type": "markdown", - "id": "0bbc6cd2", + "id": "2d4e3c2a", "metadata": {}, "source": [ "# Bonus!\n", @@ -1834,46 +1834,7 @@ "
  • What happens if you don't use the EMA model?
  • \n", "
  • What happens if you change the learning rates?
  • \n", "
  • What happens if you add a Sigmoid activation to the output of the style encoder?
  • \n", - "See what else you can think of, and see how finnicky training a GAN can be!\n", - "\n", - "# %% [markdown] tags=[\"solution\"]\n", - "The colors for the classes are sampled from matplotlib colormaps! They are the four seasons: spring, summer, autumn, and winter.\n", - "Check your style space again to see if you can see the patterns now!\n", - "\n", - "# %% tags=[\"solution\"]\n", - "Let's plot the colormaps\n", - "import matplotlib as mpl\n", - "import numpy as np\n", - "\n", - "\n", - "def plot_color_gradients(cmap_list):\n", - " gradient = np.linspace(0, 1, 256)\n", - " gradient = np.vstack((gradient, gradient))\n", - "\n", - " # Create figure and adjust figure height to number of colormaps\n", - " nrows = len(cmap_list)\n", - " figh = 0.35 + 0.15 + (nrows + (nrows - 1) * 0.1) * 0.22\n", - " fig, axs = plt.subplots(nrows=nrows + 1, figsize=(6.4, figh))\n", - " fig.subplots_adjust(top=1 - 0.35 / figh, bottom=0.15 / figh, left=0.2, right=0.99)\n", - "\n", - " for ax, name in zip(axs, cmap_list):\n", - " ax.imshow(gradient, aspect=\"auto\", cmap=mpl.colormaps[name])\n", - " ax.text(\n", - " -0.01,\n", - " 0.5,\n", - " name,\n", - " va=\"center\",\n", - " ha=\"right\",\n", - " fontsize=10,\n", - " transform=ax.transAxes,\n", - " )\n", - "\n", - " # Turn off *all* ticks & spines, not just the ones with colormaps.\n", - " for ax in axs:\n", - " ax.set_axis_off()\n", - "\n", - "\n", - "plot_color_gradients([\"spring\", \"summer\", \"autumn\", \"winter\"])" + "See what else you can think of, and see how finnicky training a GAN can be!" ] } ], diff --git a/solution.ipynb b/solution.ipynb index d4808b6..2fbb96f 100644 --- a/solution.ipynb +++ b/solution.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "be4e7a97", + "id": "5836c5fd", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -29,7 +29,7 @@ }, { "cell_type": "markdown", - "id": "a1e6c3cd", + "id": "c7ee63fa", "metadata": { "lines_to_next_cell": 0 }, @@ -41,7 +41,7 @@ }, { "cell_type": "markdown", - "id": "bc9d8f39", + "id": "25948d26", "metadata": {}, "source": [ "\n", @@ -54,7 +54,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5a28affe", + "id": "8b8c18b6", "metadata": { "lines_to_next_cell": 0 }, @@ -68,7 +68,7 @@ }, { "cell_type": "markdown", - "id": "3fb92e01", + "id": "2d9a1fa2", "metadata": { "lines_to_next_cell": 0 }, @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "225052d7", + "id": "33bedaea", "metadata": {}, "outputs": [], "source": [ @@ -102,7 +102,7 @@ }, { "cell_type": "markdown", - "id": "a878ce13", + "id": "d8c857dc", "metadata": { "lines_to_next_cell": 0 }, @@ -113,7 +113,7 @@ }, { "cell_type": "markdown", - "id": "1f8d5401", + "id": "04aa9880", "metadata": { "lines_to_next_cell": 0 }, @@ -130,7 +130,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dc1f9832", + "id": "c15b2288", "metadata": { "tags": [ "solution" @@ -154,7 +154,7 @@ }, { "cell_type": "markdown", - "id": "6dd3913c", + "id": "252e596c", "metadata": { "lines_to_next_cell": 0 }, @@ -165,7 +165,7 @@ { "cell_type": "code", "execution_count": null, - "id": "060a05f5", + "id": "2bb3f66f", "metadata": { "lines_to_next_cell": 0 }, @@ -194,7 +194,7 @@ }, { "cell_type": "markdown", - "id": "2616d093", + "id": "ad6e8d02", "metadata": { "lines_to_next_cell": 0 }, @@ -211,7 +211,7 @@ }, { "cell_type": "markdown", - "id": "76fae027", + "id": "8ff2253c", "metadata": {}, "source": [ "# Part 2: Using Integrated Gradients to find what the classifier knows\n", @@ -221,7 +221,7 @@ }, { "cell_type": "markdown", - "id": "5284eaf7", + "id": "137f5e29", "metadata": {}, "source": [ "## Attributions through integrated gradients\n", @@ -234,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "57eb7e30", + "id": "877b0dc5", "metadata": { "tags": [] }, @@ -252,7 +252,7 @@ }, { "cell_type": "markdown", - "id": "5dd749a7", + "id": "346433e9", "metadata": { "tags": [] }, @@ -268,7 +268,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9e77b469", + "id": "e5cd638a", "metadata": { "tags": [ "solution" @@ -292,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f1942f36", + "id": "d182aa7a", "metadata": { "tags": [] }, @@ -305,7 +305,7 @@ }, { "cell_type": "markdown", - "id": "7e5d1815", + "id": "ef1e380d", "metadata": { "lines_to_next_cell": 2, "tags": [] @@ -317,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d5103ffb", + "id": "3fce7d60", "metadata": { "tags": [] }, @@ -345,7 +345,7 @@ { "cell_type": "code", "execution_count": null, - "id": "858a7c14", + "id": "d38da2c2", "metadata": { "tags": [] }, @@ -358,7 +358,7 @@ }, { "cell_type": "markdown", - "id": "78c186ef", + "id": "aff26564", "metadata": { "lines_to_next_cell": 2 }, @@ -372,7 +372,7 @@ }, { "cell_type": "markdown", - "id": "5e3aa105", + "id": "53ffc390", "metadata": { "lines_to_next_cell": 0 }, @@ -385,7 +385,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8171ef5a", + "id": "160b27d5", "metadata": {}, "outputs": [], "source": [ @@ -410,7 +410,7 @@ }, { "cell_type": "markdown", - "id": "0b65a750", + "id": "42462cab", "metadata": { "lines_to_next_cell": 0 }, @@ -424,7 +424,7 @@ }, { "cell_type": "markdown", - "id": "06d9fccd", + "id": "b4cd9eac", "metadata": {}, "source": [ "\n", @@ -450,7 +450,7 @@ }, { "cell_type": "markdown", - "id": "1cf3195f", + "id": "dc603cd7", "metadata": {}, "source": [ "

    Task 2.3: Use random noise as a baseline

    \n", @@ -462,7 +462,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b09d2144", + "id": "768a9e3b", "metadata": { "tags": [ "solution" @@ -488,7 +488,7 @@ }, { "cell_type": "markdown", - "id": "e31bd3cf", + "id": "52c0979f", "metadata": { "tags": [] }, @@ -502,7 +502,7 @@ { "cell_type": "code", "execution_count": null, - "id": "af1e6318", + "id": "38a82581", "metadata": { "tags": [ "solution" @@ -532,7 +532,7 @@ }, { "cell_type": "markdown", - "id": "290bdbf3", + "id": "30b56566", "metadata": { "tags": [] }, @@ -548,7 +548,7 @@ }, { "cell_type": "markdown", - "id": "659c5758", + "id": "ee7c164c", "metadata": {}, "source": [ "

    BONUS Task: Using different attributions.

    \n", @@ -562,7 +562,7 @@ }, { "cell_type": "markdown", - "id": "b93071a2", + "id": "71b2e6fa", "metadata": {}, "source": [ "

    Checkpoint 2

    \n", @@ -582,7 +582,7 @@ }, { "cell_type": "markdown", - "id": "e9f2b7ae", + "id": "e0966908", "metadata": { "lines_to_next_cell": 0 }, @@ -610,7 +610,7 @@ }, { "cell_type": "markdown", - "id": "92223fdc", + "id": "50bfe53d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -633,7 +633,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5a72db0b", + "id": "b1ab5f6d", "metadata": {}, "outputs": [], "source": [ @@ -665,7 +665,7 @@ }, { "cell_type": "markdown", - "id": "01335c40", + "id": "7029ce21", "metadata": { "lines_to_next_cell": 0 }, @@ -680,7 +680,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d86d3dcc", + "id": "8e83c444", "metadata": { "tags": [ "solution" @@ -697,7 +697,7 @@ }, { "cell_type": "markdown", - "id": "186f40a5", + "id": "156d8774", "metadata": { "tags": [] }, @@ -712,7 +712,7 @@ }, { "cell_type": "markdown", - "id": "af4cf127", + "id": "48a541a1", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -729,7 +729,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1f145891", + "id": "1608ecf2", "metadata": { "lines_to_next_cell": 0, "tags": [ @@ -743,7 +743,7 @@ }, { "cell_type": "markdown", - "id": "c2a072a4", + "id": "b16b319e", "metadata": { "lines_to_next_cell": 0 }, @@ -754,7 +754,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f7bb217b", + "id": "1e85b7b2", "metadata": {}, "outputs": [], "source": [ @@ -764,7 +764,7 @@ }, { "cell_type": "markdown", - "id": "dfdd1272", + "id": "dc1c55cf", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -782,7 +782,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7ddd581a", + "id": "33e0ea83", "metadata": { "lines_to_next_cell": 0 }, @@ -794,7 +794,7 @@ }, { "cell_type": "markdown", - "id": "2f7fe43c", + "id": "6bbe594d", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -813,7 +813,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f01095a7", + "id": "7b5f7065", "metadata": {}, "outputs": [], "source": [ @@ -822,7 +822,7 @@ }, { "cell_type": "markdown", - "id": "bad091aa", + "id": "c4886cf1", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -838,7 +838,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e49c55e0", + "id": "1f2d9558", "metadata": {}, "outputs": [], "source": [ @@ -847,7 +847,7 @@ }, { "cell_type": "markdown", - "id": "af990b9a", + "id": "6632b7c6", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -859,7 +859,7 @@ { "cell_type": "code", "execution_count": null, - "id": "291f2062", + "id": "2b336a53", "metadata": {}, "outputs": [], "source": [ @@ -872,7 +872,7 @@ }, { "cell_type": "markdown", - "id": "919aea73", + "id": "b7c49115", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -886,7 +886,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26f2a9c1", + "id": "e076c809", "metadata": {}, "outputs": [], "source": [ @@ -898,7 +898,7 @@ }, { "cell_type": "markdown", - "id": "54780ac3", + "id": "9d7003f8", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -918,7 +918,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e7187b29", + "id": "f3b60479", "metadata": {}, "outputs": [], "source": [ @@ -942,7 +942,7 @@ { "cell_type": "code", "execution_count": null, - "id": "78521f46", + "id": "103b94b0", "metadata": { "lines_to_next_cell": 2 }, @@ -954,7 +954,7 @@ }, { "cell_type": "markdown", - "id": "036e6086", + "id": "d146e4f8", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -976,7 +976,7 @@ }, { "cell_type": "markdown", - "id": "3b756b7a", + "id": "11e0f5dd", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -988,7 +988,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8256f4b5", + "id": "6fefc28d", "metadata": { "lines_to_next_cell": 2, "tags": [ @@ -1058,7 +1058,7 @@ }, { "cell_type": "markdown", - "id": "78403aaa", + "id": "5a0870e3", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1070,7 +1070,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9bb6c797", + "id": "9c9a471f", "metadata": {}, "outputs": [], "source": [ @@ -1086,7 +1086,7 @@ }, { "cell_type": "markdown", - "id": "c32cbd8b", + "id": "f01e0c51", "metadata": { "tags": [] }, @@ -1101,7 +1101,7 @@ }, { "cell_type": "markdown", - "id": "4c86dd42", + "id": "3c545933", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1113,7 +1113,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8f728ded", + "id": "74225676", "metadata": {}, "outputs": [], "source": [ @@ -1135,7 +1135,7 @@ }, { "cell_type": "markdown", - "id": "028d3bbc", + "id": "b83dfcd2", "metadata": { "tags": [] }, @@ -1151,7 +1151,7 @@ }, { "cell_type": "markdown", - "id": "b63aac86", + "id": "740be1a4", "metadata": { "tags": [] }, @@ -1161,7 +1161,7 @@ }, { "cell_type": "markdown", - "id": "b0ad5935", + "id": "2a44a05e", "metadata": { "tags": [] }, @@ -1178,7 +1178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0cf2b9a5", + "id": "8d711fbe", "metadata": { "title": "Loading the test dataset" }, @@ -1198,7 +1198,7 @@ }, { "cell_type": "markdown", - "id": "35cc9b35", + "id": "7c1d56b5", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1210,7 +1210,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4146969c", + "id": "f649373f", "metadata": {}, "outputs": [], "source": [ @@ -1223,7 +1223,7 @@ }, { "cell_type": "markdown", - "id": "f99f676c", + "id": "4a3c2e42", "metadata": { "lines_to_next_cell": 0 }, @@ -1233,7 +1233,7 @@ }, { "cell_type": "markdown", - "id": "a7450339", + "id": "746827ec", "metadata": { "lines_to_next_cell": 0 }, @@ -1251,7 +1251,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f2bfd025", + "id": "870dd94a", "metadata": { "tags": [ "solution" @@ -1288,7 +1288,7 @@ }, { "cell_type": "markdown", - "id": "20af6915", + "id": "9e21d254", "metadata": { "lines_to_next_cell": 0, "tags": [] @@ -1300,7 +1300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4543762f", + "id": "143cb6c6", "metadata": {}, "outputs": [], "source": [ @@ -1313,7 +1313,7 @@ }, { "cell_type": "markdown", - "id": "d0b8a2ec", + "id": "a15cdb80", "metadata": { "tags": [] }, @@ -1328,7 +1328,7 @@ }, { "cell_type": "markdown", - "id": "61e58d7f", + "id": "2d2ced5b", "metadata": { "tags": [] }, @@ -1339,7 +1339,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b8dc4640", + "id": "24639718", "metadata": {}, "outputs": [], "source": [ @@ -1353,7 +1353,7 @@ }, { "cell_type": "markdown", - "id": "ed9b7104", + "id": "8afab4b7", "metadata": { "tags": [] }, @@ -1368,7 +1368,7 @@ }, { "cell_type": "markdown", - "id": "0add70ab", + "id": "927a2361", "metadata": { "lines_to_next_cell": 0 }, @@ -1383,7 +1383,7 @@ { "cell_type": "code", "execution_count": null, - "id": "274d8226", + "id": "87aaa903", "metadata": {}, "outputs": [], "source": [ @@ -1404,7 +1404,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b6b54bbe", + "id": "3633b841", "metadata": { "title": "Another visualization function" }, @@ -1433,7 +1433,7 @@ { "cell_type": "code", "execution_count": null, - "id": "979ca20f", + "id": "32f5d3ba", "metadata": { "lines_to_next_cell": 0 }, @@ -1449,7 +1449,7 @@ }, { "cell_type": "markdown", - "id": "b779b535", + "id": "89ecd3a9", "metadata": { "lines_to_next_cell": 0 }, @@ -1465,7 +1465,7 @@ }, { "cell_type": "markdown", - "id": "c44f0cf1", + "id": "19612f27", "metadata": { "lines_to_next_cell": 0 }, @@ -1480,7 +1480,7 @@ }, { "cell_type": "markdown", - "id": "f402bb74", + "id": "f63764f2", "metadata": { "lines_to_next_cell": 0 }, @@ -1494,7 +1494,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12542a6d", + "id": "1bcaa03b", "metadata": { "lines_to_next_cell": 0 }, @@ -1514,7 +1514,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8d3e6d86", + "id": "d3b3324a", "metadata": { "lines_to_next_cell": 0 }, @@ -1550,7 +1550,7 @@ }, { "cell_type": "markdown", - "id": "ca1d1242", + "id": "81ea3fdd", "metadata": { "lines_to_next_cell": 0 }, @@ -1564,7 +1564,7 @@ }, { "cell_type": "markdown", - "id": "6f530e82", + "id": "92c0dba0", "metadata": { "lines_to_next_cell": 0 }, @@ -1580,7 +1580,7 @@ }, { "cell_type": "markdown", - "id": "5904b20b", + "id": "93ad40a6", "metadata": { "lines_to_next_cell": 0 }, @@ -1596,7 +1596,7 @@ }, { "cell_type": "markdown", - "id": "72ca6d87", + "id": "884f4f82", "metadata": { "lines_to_next_cell": 0 }, @@ -1619,7 +1619,7 @@ }, { "cell_type": "markdown", - "id": "a0baa037", + "id": "344c70a0", "metadata": {}, "source": [ "

    Task 5.1: Explore the style space

    \n", @@ -1631,7 +1631,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bc0062db", + "id": "bc06e146", "metadata": {}, "outputs": [], "source": [ @@ -1666,7 +1666,7 @@ }, { "cell_type": "markdown", - "id": "ba428131", + "id": "518c6ddb", "metadata": { "lines_to_next_cell": 0 }, @@ -1682,7 +1682,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2ee2e061", + "id": "40fa1f61", "metadata": { "lines_to_next_cell": 0 }, @@ -1709,7 +1709,7 @@ }, { "cell_type": "markdown", - "id": "a26903b8", + "id": "a204c00f", "metadata": { "lines_to_next_cell": 0 }, @@ -1723,7 +1723,7 @@ }, { "cell_type": "markdown", - "id": "7f72dfb5", + "id": "16971e9c", "metadata": { "lines_to_next_cell": 0 }, @@ -1740,7 +1740,7 @@ { "cell_type": "code", "execution_count": null, - "id": "be62a09b", + "id": "5d04746b", "metadata": {}, "outputs": [], "source": [ @@ -1762,7 +1762,7 @@ }, { "cell_type": "markdown", - "id": "f16385e4", + "id": "7dd5245c", "metadata": {}, "source": [ "

    Questions

    \n", @@ -1774,7 +1774,7 @@ }, { "cell_type": "markdown", - "id": "e0f59eee", + "id": "4ab3793f", "metadata": {}, "source": [ "

    Checkpoint 5

    \n", @@ -1792,7 +1792,7 @@ }, { "cell_type": "markdown", - "id": "0bbc6cd2", + "id": "2d4e3c2a", "metadata": {}, "source": [ "# Bonus!\n", @@ -1802,14 +1802,34 @@ "
  • What happens if you don't use the EMA model?
  • \n", "
  • What happens if you change the learning rates?
  • \n", "
  • What happens if you add a Sigmoid activation to the output of the style encoder?
  • \n", - "See what else you can think of, and see how finnicky training a GAN can be!\n", - "\n", - "# %% [markdown] tags=[\"solution\"]\n", + "See what else you can think of, and see how finnicky training a GAN can be!" + ] + }, + { + "cell_type": "markdown", + "id": "1cd1cd8b", + "metadata": { + "tags": [ + "solution" + ] + }, + "source": [ "The colors for the classes are sampled from matplotlib colormaps! They are the four seasons: spring, summer, autumn, and winter.\n", - "Check your style space again to see if you can see the patterns now!\n", - "\n", - "# %% tags=[\"solution\"]\n", - "Let's plot the colormaps\n", + "Check your style space again to see if you can see the patterns now!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ef23fc4", + "metadata": { + "tags": [ + "solution" + ] + }, + "outputs": [], + "source": [ + "# Let's plot the colormaps\n", "import matplotlib as mpl\n", "import numpy as np\n", "\n",