From a1362241ec3d3e6d788f5ed5b073addc940e7ea0 Mon Sep 17 00:00:00 2001 From: daniel <1534513+dantp-ai@users.noreply.github.com> Date: Tue, 27 Feb 2024 19:21:02 +0100 Subject: [PATCH 1/3] Split run test modules into individual per-module runs --- .github/workflows/minitorch.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.github/workflows/minitorch.yml b/.github/workflows/minitorch.yml index a03fe01..0843fd8 100644 --- a/.github/workflows/minitorch.yml +++ b/.github/workflows/minitorch.yml @@ -26,30 +26,43 @@ jobs: run: | # stop the build if there are Python syntax errors or undefined names flake8 --ignore "N801, E203, E266, E501, W503, F812, F401, F841, E741, N803, N802, N806" minitorch/ tests/ project/ - - name: Test with pytest + + - name: Test Module 0 run: | echo "Module 0" pytest tests -x -m task0_1 pytest tests -x -m task0_2 pytest tests -x -m task0_3 pytest tests -x -m task0_4 + + - name: Test Module 1 + run: | echo "Module 1" pytest tests -x -m task1_1 pytest tests -x -m task1_2 pytest tests -x -m task1_3 pytest tests -x -m task1_4 + + - name: Test Module 2 + run: | echo "Module 2" pytest tests -x -m task2_1 pytest tests -x -m task2_2 pytest tests -x -m task2_3 pytest tests -x -m task2_4 + + - name: Test Module 3 + run: | echo "Module 3" pytest tests -x -m task3_1 pytest tests -x -m task3_2 pytest tests -x -m task3_3 pytest tests -x -m task3_4 + + - name: Test Module 4 + run: | echo "Module 4" pytest tests -x -m task4_1 pytest tests -x -m task4_2 pytest tests -x -m task4_3 - pytest tests -x -m task4_4 + pytest tests -x -m task4_4 \ No newline at end of file From 330bd6300fa41071d35e7d123d64a93849a4dde5 Mon Sep 17 00:00:00 2001 From: daniel <1534513+dantp-ai@users.noreply.github.com> Date: Thu, 29 Feb 2024 23:47:56 +0100 Subject: [PATCH 2/3] Ignore F401 for now as they depend on things not implemented yet. --- minitorch/autodiff.py | 2 +- minitorch/fast_conv.py | 7 +++---- minitorch/fast_ops.py | 10 ++++++---- minitorch/nn.py | 2 +- minitorch/scalar.py | 16 ++++++++-------- minitorch/tensor_ops.py | 13 +++++++------ project/interface/mlprimer.py | 2 +- project/run_manual.py | 1 + project/run_scalar.py | 1 + tests/test_operators.py | 2 +- 10 files changed, 30 insertions(+), 26 deletions(-) diff --git a/minitorch/autodiff.py b/minitorch/autodiff.py index 06496e2..a3b02ac 100644 --- a/minitorch/autodiff.py +++ b/minitorch/autodiff.py @@ -1,5 +1,5 @@ from dataclasses import dataclass -from typing import Any, Iterable, List, Tuple +from typing import Any, Iterable, List, Tuple # noqa: F401 from typing_extensions import Protocol diff --git a/minitorch/fast_conv.py b/minitorch/fast_conv.py index ce4244c..37aa27c 100644 --- a/minitorch/fast_conv.py +++ b/minitorch/fast_conv.py @@ -1,13 +1,12 @@ from typing import Tuple -import numpy as np -from numba import njit, prange +import numpy as np # noqa: F401 +from numba import njit, prange # noqa: F401 from .autodiff import Context from .tensor import Tensor +from .tensor_data import MAX_DIMS, Index # noqa: F401 from .tensor_data import ( - MAX_DIMS, - Index, Shape, Strides, broadcast_index, diff --git a/minitorch/fast_ops.py b/minitorch/fast_ops.py index dc73b86..40147d0 100644 --- a/minitorch/fast_ops.py +++ b/minitorch/fast_ops.py @@ -2,11 +2,12 @@ from typing import TYPE_CHECKING -import numpy as np -from numba import njit, prange +import numpy as np # noqa: F401 +from numba import njit +from numba import prange # noqa: F401 +from .tensor_data import MAX_DIMS # noqa: F401 from .tensor_data import ( - MAX_DIMS, broadcast_index, index_to_position, shape_broadcast, @@ -18,7 +19,8 @@ from typing import Callable, Optional from .tensor import Tensor - from .tensor_data import Index, Shape, Storage, Strides + from .tensor_data import Index # noqa: F401 + from .tensor_data import Shape, Storage, Strides # TIP: Use `NUMBA_DISABLE_JIT=1 pytest tests/ -m task3_1` to run these tests without JIT. diff --git a/minitorch/nn.py b/minitorch/nn.py index 92c0c8f..a74e367 100644 --- a/minitorch/nn.py +++ b/minitorch/nn.py @@ -4,7 +4,7 @@ from .autodiff import Context from .fast_ops import FastOps from .tensor import Tensor -from .tensor_functions import Function, rand, tensor +from .tensor_functions import Function, rand, tensor # noqa: F401 def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]: diff --git a/minitorch/scalar.py b/minitorch/scalar.py index 942079d..449ca51 100644 --- a/minitorch/scalar.py +++ b/minitorch/scalar.py @@ -6,18 +6,18 @@ import numpy as np from .autodiff import Context, Variable, backpropagate, central_difference +from .scalar_functions import EQ # noqa: F401 +from .scalar_functions import LT # noqa: F401 +from .scalar_functions import Add # noqa: F401 +from .scalar_functions import Exp # noqa: F401 +from .scalar_functions import Log # noqa: F401 +from .scalar_functions import Neg # noqa: F401 +from .scalar_functions import ReLU # noqa: F401 +from .scalar_functions import Sigmoid # noqa: F401 from .scalar_functions import ( - EQ, - LT, - Add, - Exp, Inv, - Log, Mul, - Neg, - ReLU, ScalarFunction, - Sigmoid, ) ScalarLike = Union[float, int, "Scalar"] diff --git a/minitorch/tensor_ops.py b/minitorch/tensor_ops.py index db82d54..29d92a3 100644 --- a/minitorch/tensor_ops.py +++ b/minitorch/tensor_ops.py @@ -2,21 +2,22 @@ from typing import TYPE_CHECKING, Callable, Optional, Type -import numpy as np +import numpy as np # noqa: F401 from typing_extensions import Protocol from . import operators +from .tensor_data import MAX_DIMS # noqa: F401 +from .tensor_data import broadcast_index # noqa: F401 +from .tensor_data import index_to_position # noqa: F401 +from .tensor_data import to_index # noqa: F401 from .tensor_data import ( - MAX_DIMS, - broadcast_index, - index_to_position, shape_broadcast, - to_index, ) if TYPE_CHECKING: from .tensor import Tensor - from .tensor_data import Index, Shape, Storage, Strides + from .tensor_data import Index # noqa: F401 + from .tensor_data import Shape, Storage, Strides class MapProto(Protocol): diff --git a/project/interface/mlprimer.py b/project/interface/mlprimer.py index 1d98deb..0de73d5 100644 --- a/project/interface/mlprimer.py +++ b/project/interface/mlprimer.py @@ -1,11 +1,11 @@ import random import chalk as ch +from chalk import path # noqa: F401 from chalk import ( Trail, empty, make_path, - path, place_on_path, rectangle, unit_x, diff --git a/project/run_manual.py b/project/run_manual.py index 302846f..d14c780 100644 --- a/project/run_manual.py +++ b/project/run_manual.py @@ -2,6 +2,7 @@ Be sure you have minitorch installed in you Virtual Env. >>> pip install -Ue . """ + import random import minitorch diff --git a/project/run_scalar.py b/project/run_scalar.py index 4b7ee22..cad6511 100644 --- a/project/run_scalar.py +++ b/project/run_scalar.py @@ -2,6 +2,7 @@ Be sure you have minitorch installed in you Virtual Env. >>> pip install -Ue . """ + import random import minitorch diff --git a/tests/test_operators.py b/tests/test_operators.py index 1069423..a279197 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -5,6 +5,7 @@ from hypothesis.strategies import lists from minitorch import MathTest +from minitorch.operators import sigmoid # noqa: F401 from minitorch.operators import ( add, addLists, @@ -21,7 +22,6 @@ prod, relu, relu_back, - sigmoid, sum, ) From 246f23105542d6c7b723a47f20c828e9412b7fb6 Mon Sep 17 00:00:00 2001 From: daniel <1534513+dantp-ai@users.noreply.github.com> Date: Thu, 29 Feb 2024 23:49:12 +0100 Subject: [PATCH 3/3] Format with black and ignore F841 for now --- minitorch/autodiff.py | 6 +++--- minitorch/cuda_ops.py | 12 ++++++------ minitorch/fast_conv.py | 12 ++++++------ minitorch/fast_ops.py | 8 ++++---- minitorch/module.py | 8 ++++---- minitorch/nn.py | 16 ++++++++-------- minitorch/scalar.py | 22 +++++++++++----------- minitorch/scalar_functions.py | 32 ++++++++++++++++---------------- minitorch/tensor_data.py | 10 +++++----- minitorch/tensor_functions.py | 34 +++++++++++++++++----------------- minitorch/tensor_ops.py | 9 ++++----- project/run_sentiment.py | 2 +- tests/test_nn.py | 2 +- tests/test_operators.py | 12 ++++++------ 14 files changed, 92 insertions(+), 93 deletions(-) diff --git a/minitorch/autodiff.py b/minitorch/autodiff.py index a3b02ac..9431908 100644 --- a/minitorch/autodiff.py +++ b/minitorch/autodiff.py @@ -23,7 +23,7 @@ def central_difference(f: Any, *vals: Any, arg: int = 0, epsilon: float = 1e-6) An approximation of $f'_i(x_0, \ldots, x_{n-1})$ """ # TODO: Implement for Task 1.1. - raise NotImplementedError('Need to implement for Task 1.1') + raise NotImplementedError("Need to implement for Task 1.1") variable_count = 1 @@ -62,7 +62,7 @@ def topological_sort(variable: Variable) -> Iterable[Variable]: Non-constant Variables in topological order starting from the right. """ # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") def backpropagate(variable: Variable, deriv: Any) -> None: @@ -77,7 +77,7 @@ def backpropagate(variable: Variable, deriv: Any) -> None: No return. Should write to its results to the derivative values of each leaf through `accumulate_derivative`. """ # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") @dataclass diff --git a/minitorch/cuda_ops.py b/minitorch/cuda_ops.py index ac4cbae..22c2124 100644 --- a/minitorch/cuda_ops.py +++ b/minitorch/cuda_ops.py @@ -154,7 +154,7 @@ def _map( in_index = cuda.local.array(MAX_DIMS, numba.int32) i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x # TODO: Implement for Task 3.3. - raise NotImplementedError('Need to implement for Task 3.3') + raise NotImplementedError("Need to implement for Task 3.3") return cuda.jit()(_map) # type: ignore @@ -196,7 +196,7 @@ def _zip( i = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x # TODO: Implement for Task 3.3. - raise NotImplementedError('Need to implement for Task 3.3') + raise NotImplementedError("Need to implement for Task 3.3") return cuda.jit()(_zip) # type: ignore @@ -229,7 +229,7 @@ def _sum_practice(out: Storage, a: Storage, size: int) -> None: pos = cuda.threadIdx.x # TODO: Implement for Task 3.3. - raise NotImplementedError('Need to implement for Task 3.3') + raise NotImplementedError("Need to implement for Task 3.3") jit_sum_practice = cuda.jit()(_sum_practice) @@ -279,7 +279,7 @@ def _reduce( pos = cuda.threadIdx.x # TODO: Implement for Task 3.3. - raise NotImplementedError('Need to implement for Task 3.3') + raise NotImplementedError("Need to implement for Task 3.3") return cuda.jit()(_reduce) # type: ignore @@ -316,7 +316,7 @@ def _mm_practice(out: Storage, a: Storage, b: Storage, size: int) -> None: """ BLOCK_DIM = 32 # TODO: Implement for Task 3.3. - raise NotImplementedError('Need to implement for Task 3.3') + raise NotImplementedError("Need to implement for Task 3.3") jit_mm_practice = cuda.jit()(_mm_practice) @@ -386,7 +386,7 @@ def _tensor_matrix_multiply( # b) Copy into shared memory for b matrix # c) Compute the dot produce for position c[i, j] # TODO: Implement for Task 3.4. - raise NotImplementedError('Need to implement for Task 3.4') + raise NotImplementedError("Need to implement for Task 3.4") tensor_matrix_multiply = cuda.jit(_tensor_matrix_multiply) diff --git a/minitorch/fast_conv.py b/minitorch/fast_conv.py index 37aa27c..f9dff39 100644 --- a/minitorch/fast_conv.py +++ b/minitorch/fast_conv.py @@ -76,11 +76,11 @@ def _tensor_conv1d( and in_channels == in_channels_ and out_channels == out_channels_ ) - s1 = input_strides - s2 = weight_strides + s1 = input_strides # noqa: F841 + s2 = weight_strides # noqa: F841 # TODO: Implement for Task 4.1. - raise NotImplementedError('Need to implement for Task 4.1') + raise NotImplementedError("Need to implement for Task 4.1") tensor_conv1d = njit(parallel=True)(_tensor_conv1d) @@ -202,11 +202,11 @@ def _tensor_conv2d( s1 = input_strides s2 = weight_strides # inners - s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3] - s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3] + s10, s11, s12, s13 = s1[0], s1[1], s1[2], s1[3] # noqa: F841 + s20, s21, s22, s23 = s2[0], s2[1], s2[2], s2[3] # noqa: F841 # TODO: Implement for Task 4.2. - raise NotImplementedError('Need to implement for Task 4.2') + raise NotImplementedError("Need to implement for Task 4.2") tensor_conv2d = njit(parallel=True, fastmath=True)(_tensor_conv2d) diff --git a/minitorch/fast_ops.py b/minitorch/fast_ops.py index 40147d0..924e136 100644 --- a/minitorch/fast_ops.py +++ b/minitorch/fast_ops.py @@ -162,7 +162,7 @@ def _map( in_strides: Strides, ) -> None: # TODO: Implement for Task 3.1. - raise NotImplementedError('Need to implement for Task 3.1') + raise NotImplementedError("Need to implement for Task 3.1") return njit(parallel=True)(_map) # type: ignore @@ -201,7 +201,7 @@ def _zip( b_strides: Strides, ) -> None: # TODO: Implement for Task 3.1. - raise NotImplementedError('Need to implement for Task 3.1') + raise NotImplementedError("Need to implement for Task 3.1") return njit(parallel=True)(_zip) # type: ignore @@ -235,7 +235,7 @@ def _reduce( reduce_dim: int, ) -> None: # TODO: Implement for Task 3.1. - raise NotImplementedError('Need to implement for Task 3.1') + raise NotImplementedError("Need to implement for Task 3.1") return njit(parallel=True)(_reduce) # type: ignore @@ -285,7 +285,7 @@ def _tensor_matrix_multiply( b_batch_stride = b_strides[0] if b_shape[0] > 1 else 0 # TODO: Implement for Task 3.2. - raise NotImplementedError('Need to implement for Task 3.2') + raise NotImplementedError("Need to implement for Task 3.2") tensor_matrix_multiply = njit(parallel=True, fastmath=True)(_tensor_matrix_multiply) diff --git a/minitorch/module.py b/minitorch/module.py index d32c609..8f17cfb 100644 --- a/minitorch/module.py +++ b/minitorch/module.py @@ -32,12 +32,12 @@ def modules(self) -> Sequence[Module]: def train(self) -> None: "Set the mode of this module and all descendent modules to `train`." # TODO: Implement for Task 0.4. - raise NotImplementedError('Need to implement for Task 0.4') + raise NotImplementedError("Need to implement for Task 0.4") def eval(self) -> None: "Set the mode of this module and all descendent modules to `eval`." # TODO: Implement for Task 0.4. - raise NotImplementedError('Need to implement for Task 0.4') + raise NotImplementedError("Need to implement for Task 0.4") def named_parameters(self) -> Sequence[Tuple[str, Parameter]]: """ @@ -48,12 +48,12 @@ def named_parameters(self) -> Sequence[Tuple[str, Parameter]]: The name and `Parameter` of each ancestor parameter. """ # TODO: Implement for Task 0.4. - raise NotImplementedError('Need to implement for Task 0.4') + raise NotImplementedError("Need to implement for Task 0.4") def parameters(self) -> Sequence[Parameter]: "Enumerate over all the parameters of this module and its descendents." # TODO: Implement for Task 0.4. - raise NotImplementedError('Need to implement for Task 0.4') + raise NotImplementedError("Need to implement for Task 0.4") def add_parameter(self, k: str, v: Any) -> Parameter: """ diff --git a/minitorch/nn.py b/minitorch/nn.py index a74e367..e39e4fb 100644 --- a/minitorch/nn.py +++ b/minitorch/nn.py @@ -24,7 +24,7 @@ def tile(input: Tensor, kernel: Tuple[int, int]) -> Tuple[Tensor, int, int]: assert height % kh == 0 assert width % kw == 0 # TODO: Implement for Task 4.3. - raise NotImplementedError('Need to implement for Task 4.3') + raise NotImplementedError("Need to implement for Task 4.3") def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor: @@ -40,7 +40,7 @@ def avgpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor: """ batch, channel, height, width = input.shape # TODO: Implement for Task 4.3. - raise NotImplementedError('Need to implement for Task 4.3') + raise NotImplementedError("Need to implement for Task 4.3") max_reduce = FastOps.reduce(operators.max, -1e9) @@ -68,13 +68,13 @@ class Max(Function): def forward(ctx: Context, input: Tensor, dim: Tensor) -> Tensor: "Forward of max should be max reduction" # TODO: Implement for Task 4.4. - raise NotImplementedError('Need to implement for Task 4.4') + raise NotImplementedError("Need to implement for Task 4.4") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]: "Backward of max should be argmax (see above)" # TODO: Implement for Task 4.4. - raise NotImplementedError('Need to implement for Task 4.4') + raise NotImplementedError("Need to implement for Task 4.4") def max(input: Tensor, dim: int) -> Tensor: @@ -97,7 +97,7 @@ def softmax(input: Tensor, dim: int) -> Tensor: softmax tensor """ # TODO: Implement for Task 4.4. - raise NotImplementedError('Need to implement for Task 4.4') + raise NotImplementedError("Need to implement for Task 4.4") def logsoftmax(input: Tensor, dim: int) -> Tensor: @@ -116,7 +116,7 @@ def logsoftmax(input: Tensor, dim: int) -> Tensor: log of softmax tensor """ # TODO: Implement for Task 4.4. - raise NotImplementedError('Need to implement for Task 4.4') + raise NotImplementedError("Need to implement for Task 4.4") def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor: @@ -132,7 +132,7 @@ def maxpool2d(input: Tensor, kernel: Tuple[int, int]) -> Tensor: """ batch, channel, height, width = input.shape # TODO: Implement for Task 4.4. - raise NotImplementedError('Need to implement for Task 4.4') + raise NotImplementedError("Need to implement for Task 4.4") def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor: @@ -148,4 +148,4 @@ def dropout(input: Tensor, rate: float, ignore: bool = False) -> Tensor: tensor with random positions dropped out """ # TODO: Implement for Task 4.4. - raise NotImplementedError('Need to implement for Task 4.4') + raise NotImplementedError("Need to implement for Task 4.4") diff --git a/minitorch/scalar.py b/minitorch/scalar.py index 449ca51..aa36ddb 100644 --- a/minitorch/scalar.py +++ b/minitorch/scalar.py @@ -93,30 +93,30 @@ def __rtruediv__(self, b: ScalarLike) -> Scalar: def __add__(self, b: ScalarLike) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def __bool__(self) -> bool: return bool(self.data) def __lt__(self, b: ScalarLike) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def __gt__(self, b: ScalarLike) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def __eq__(self, b: ScalarLike) -> Scalar: # type: ignore[override] # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def __sub__(self, b: ScalarLike) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def __neg__(self) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def __radd__(self, b: ScalarLike) -> Scalar: return self + b @@ -126,19 +126,19 @@ def __rmul__(self, b: ScalarLike) -> Scalar: def log(self) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def exp(self) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def sigmoid(self) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") def relu(self) -> Scalar: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") # Variable elements for backprop @@ -174,7 +174,7 @@ def chain_rule(self, d_output: Any) -> Iterable[Tuple[Variable, Any]]: assert h.ctx is not None # TODO: Implement for Task 1.3. - raise NotImplementedError('Need to implement for Task 1.3') + raise NotImplementedError("Need to implement for Task 1.3") def backward(self, d_output: Optional[float] = None) -> None: """ diff --git a/minitorch/scalar_functions.py b/minitorch/scalar_functions.py index b5deab0..d8d2307 100644 --- a/minitorch/scalar_functions.py +++ b/minitorch/scalar_functions.py @@ -104,12 +104,12 @@ class Mul(ScalarFunction): @staticmethod def forward(ctx: Context, a: float, b: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> Tuple[float, float]: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") class Inv(ScalarFunction): @@ -118,12 +118,12 @@ class Inv(ScalarFunction): @staticmethod def forward(ctx: Context, a: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> float: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") class Neg(ScalarFunction): @@ -132,12 +132,12 @@ class Neg(ScalarFunction): @staticmethod def forward(ctx: Context, a: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> float: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") class Sigmoid(ScalarFunction): @@ -146,12 +146,12 @@ class Sigmoid(ScalarFunction): @staticmethod def forward(ctx: Context, a: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> float: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") class ReLU(ScalarFunction): @@ -160,12 +160,12 @@ class ReLU(ScalarFunction): @staticmethod def forward(ctx: Context, a: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> float: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") class Exp(ScalarFunction): @@ -174,12 +174,12 @@ class Exp(ScalarFunction): @staticmethod def forward(ctx: Context, a: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> float: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") class LT(ScalarFunction): @@ -188,12 +188,12 @@ class LT(ScalarFunction): @staticmethod def forward(ctx: Context, a: float, b: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> Tuple[float, float]: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") class EQ(ScalarFunction): @@ -202,9 +202,9 @@ class EQ(ScalarFunction): @staticmethod def forward(ctx: Context, a: float, b: float) -> float: # TODO: Implement for Task 1.2. - raise NotImplementedError('Need to implement for Task 1.2') + raise NotImplementedError("Need to implement for Task 1.2") @staticmethod def backward(ctx: Context, d_output: float) -> Tuple[float, float]: # TODO: Implement for Task 1.4. - raise NotImplementedError('Need to implement for Task 1.4') + raise NotImplementedError("Need to implement for Task 1.4") diff --git a/minitorch/tensor_data.py b/minitorch/tensor_data.py index 1d4a0c9..7dc0fc1 100644 --- a/minitorch/tensor_data.py +++ b/minitorch/tensor_data.py @@ -44,7 +44,7 @@ def index_to_position(index: Index, strides: Strides) -> int: """ # TODO: Implement for Task 2.1. - raise NotImplementedError('Need to implement for Task 2.1') + raise NotImplementedError("Need to implement for Task 2.1") def to_index(ordinal: int, shape: Shape, out_index: OutIndex) -> None: @@ -61,7 +61,7 @@ def to_index(ordinal: int, shape: Shape, out_index: OutIndex) -> None: """ # TODO: Implement for Task 2.1. - raise NotImplementedError('Need to implement for Task 2.1') + raise NotImplementedError("Need to implement for Task 2.1") def broadcast_index( @@ -84,7 +84,7 @@ def broadcast_index( None """ # TODO: Implement for Task 2.2. - raise NotImplementedError('Need to implement for Task 2.2') + raise NotImplementedError("Need to implement for Task 2.2") def shape_broadcast(shape1: UserShape, shape2: UserShape) -> UserShape: @@ -102,7 +102,7 @@ def shape_broadcast(shape1: UserShape, shape2: UserShape) -> UserShape: IndexingError : if cannot broadcast """ # TODO: Implement for Task 2.2. - raise NotImplementedError('Need to implement for Task 2.2') + raise NotImplementedError("Need to implement for Task 2.2") def strides_from_shape(shape: UserShape) -> UserStrides: @@ -228,7 +228,7 @@ def permute(self, *order: int) -> TensorData: ), f"Must give a position to each dimension. Shape: {self.shape} Order: {order}" # TODO: Implement for Task 2.1. - raise NotImplementedError('Need to implement for Task 2.1') + raise NotImplementedError("Need to implement for Task 2.1") def to_string(self) -> str: s = "" diff --git a/minitorch/tensor_functions.py b/minitorch/tensor_functions.py index f1c0547..86db01a 100644 --- a/minitorch/tensor_functions.py +++ b/minitorch/tensor_functions.py @@ -100,60 +100,60 @@ class Mul(Function): @staticmethod def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class Sigmoid(Function): @staticmethod def forward(ctx: Context, t1: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tensor: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class ReLU(Function): @staticmethod def forward(ctx: Context, t1: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tensor: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class Log(Function): @staticmethod def forward(ctx: Context, t1: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tensor: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class Exp(Function): @staticmethod def forward(ctx: Context, t1: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tensor: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class Sum(Function): @@ -181,43 +181,43 @@ class LT(Function): @staticmethod def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class EQ(Function): @staticmethod def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, Tensor]: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class IsClose(Function): @staticmethod def forward(ctx: Context, a: Tensor, b: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") class Permute(Function): @staticmethod def forward(ctx: Context, a: Tensor, order: Tensor) -> Tensor: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") @staticmethod def backward(ctx: Context, grad_output: Tensor) -> Tuple[Tensor, float]: # TODO: Implement for Task 2.4. - raise NotImplementedError('Need to implement for Task 2.4') + raise NotImplementedError("Need to implement for Task 2.4") class View(Function): diff --git a/minitorch/tensor_ops.py b/minitorch/tensor_ops.py index 29d92a3..aa7dc1c 100644 --- a/minitorch/tensor_ops.py +++ b/minitorch/tensor_ops.py @@ -21,8 +21,7 @@ class MapProto(Protocol): - def __call__(self, x: Tensor, out: Optional[Tensor] = ..., /) -> Tensor: - ... + def __call__(self, x: Tensor, out: Optional[Tensor] = ..., /) -> Tensor: ... class TensorOps: @@ -266,7 +265,7 @@ def _map( in_strides: Strides, ) -> None: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") return _map @@ -311,7 +310,7 @@ def _zip( b_strides: Strides, ) -> None: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") return _zip @@ -342,7 +341,7 @@ def _reduce( reduce_dim: int, ) -> None: # TODO: Implement for Task 2.3. - raise NotImplementedError('Need to implement for Task 2.3') + raise NotImplementedError("Need to implement for Task 2.3") return _reduce diff --git a/project/run_sentiment.py b/project/run_sentiment.py index 4da963d..8202062 100644 --- a/project/run_sentiment.py +++ b/project/run_sentiment.py @@ -105,7 +105,7 @@ def get_predictions_array(y_true, model_output): def get_accuracy(predictions_array): correct = 0 - for (y_true, y_pred, logit) in predictions_array: + for y_true, y_pred, logit in predictions_array: if y_true == y_pred: correct += 1 return correct / len(predictions_array) diff --git a/tests/test_nn.py b/tests/test_nn.py index 23f9b3e..9c2efe9 100644 --- a/tests/test_nn.py +++ b/tests/test_nn.py @@ -32,7 +32,7 @@ def test_avg(t: Tensor) -> None: @given(tensors(shape=(2, 3, 4))) def test_max(t: Tensor) -> None: # TODO: Implement for Task 4.4. - raise NotImplementedError('Need to implement for Task 4.4') + raise NotImplementedError("Need to implement for Task 4.4") @pytest.mark.task4_4 diff --git a/tests/test_operators.py b/tests/test_operators.py index a279197..8e9886c 100644 --- a/tests/test_operators.py +++ b/tests/test_operators.py @@ -108,7 +108,7 @@ def test_sigmoid(a: float) -> None: * It is strictly increasing. """ # TODO: Implement for Task 0.2. - raise NotImplementedError('Need to implement for Task 0.2') + raise NotImplementedError("Need to implement for Task 0.2") @pytest.mark.task0_2 @@ -116,7 +116,7 @@ def test_sigmoid(a: float) -> None: def test_transitive(a: float, b: float, c: float) -> None: "Test the transitive property of less-than (a < b and b < c implies a < c)" # TODO: Implement for Task 0.2. - raise NotImplementedError('Need to implement for Task 0.2') + raise NotImplementedError("Need to implement for Task 0.2") @pytest.mark.task0_2 @@ -126,7 +126,7 @@ def test_symmetric() -> None: gives the same value regardless of the order of its input. """ # TODO: Implement for Task 0.2. - raise NotImplementedError('Need to implement for Task 0.2') + raise NotImplementedError("Need to implement for Task 0.2") @pytest.mark.task0_2 @@ -136,7 +136,7 @@ def test_distribute() -> None: :math:`z \times (x + y) = z \times x + z \times y` """ # TODO: Implement for Task 0.2. - raise NotImplementedError('Need to implement for Task 0.2') + raise NotImplementedError("Need to implement for Task 0.2") @pytest.mark.task0_2 @@ -145,7 +145,7 @@ def test_other() -> None: Write a test that ensures some other property holds for your functions. """ # TODO: Implement for Task 0.2. - raise NotImplementedError('Need to implement for Task 0.2') + raise NotImplementedError("Need to implement for Task 0.2") # ## Task 0.3 - Higher-order functions @@ -174,7 +174,7 @@ def test_sum_distribute(ls1: List[float], ls2: List[float]) -> None: is the same as the sum of each element of `ls1` plus each element of `ls2`. """ # TODO: Implement for Task 0.3. - raise NotImplementedError('Need to implement for Task 0.3') + raise NotImplementedError("Need to implement for Task 0.3") @pytest.mark.task0_3