Skip to content

Commit

Permalink
Merge pull request #487 from gizatechxyz/refactor-tensor-and
Browse files Browse the repository at this point in the history
Refactoring: Tensor AND
  • Loading branch information
raphaelDkhn authored Dec 1, 2023
2 parents 6bcbb99 + 26eb00e commit daab398
Show file tree
Hide file tree
Showing 55 changed files with 98 additions and 1,254 deletions.
43 changes: 10 additions & 33 deletions docs/framework/operators/tensor/tensor.and.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#tensor.and

```rust
fn and(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<usize>;
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool>;
```

Computes the logical AND of two tensors element-wise.
Expand All @@ -11,57 +11,34 @@ The input tensors must have either:

## Args

* `self`(`@Tensor<T>`) - The first tensor to be compared
* `other`(`@Tensor<T>`) - The second tensor to be compared
* `self`(`@Tensor<bool>`) - The first tensor to be compared
* `other`(`@Tensor<bool>`) - The second tensor to be compared

## Panics

* Panics if the shapes are not equal or broadcastable

## Returns

A new `Tensor<usize>` of booleans (0 or 1) with the same shape as the broadcasted inputs.
A new `Tensor<bool>` with the same shape as the broadcasted inputs.

## Examples

Case 1: Compare tensors with same shape

```rust
use array::{ArrayTrait, SpanTrait};

use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};

fn and_example() -> Tensor<usize> {
let tensor_1 = TensorTrait::<u32>::new(
shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
);

let tensor_2 = TensorTrait::<u32>::new(
shape: array![3, 3].span(), data: array![0, 1, 2, 0, 1, 2, 0, 1, 2].span(),
);

return tensor_1.and(@tensor_2);
}
>>> [0,1,1,0,1,1,0,1,1]
```

Case 2: Compare tensors with different shapes

```rust
use array::{ArrayTrait, SpanTrait};

use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};

fn and_example() -> Tensor<usize> {
let tensor_1 = TensorTrait::<u32>::new(
shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
fn and_example() -> Tensor<bool> {
let tensor_1 = TensorTrait::<bool>::new(
shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(),
);

let tensor_2 = TensorTrait::<u32>::new(
shape: array![1, 3].span(), data: array![0, 1, 2].span(),
let tensor_2 = TensorTrait::<bool>::new(
shape: array![3, 3].span(), data: array![false, false, true, true, false, true, false, true, false, true, false, true].span(),
);

return tensor_1.and(@tensor_2);
}
>>> [0,1,1,0,1,1,0,1,1]
>>> [false, false, false, false, false, true, false, false, false, false, false, true]
```
146 changes: 11 additions & 135 deletions nodegen/node/and.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,153 +5,29 @@

class And(RunAll):
@staticmethod
def and_u32():
def and_bool():
def default():
x = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
y = np.random.randint(0, 6, (3, 3, 3)).astype(np.uint32)
x = (np.random.randn(3, 4) > 0).astype(bool)
y = (np.random.randn(3, 4) > 0).astype(bool)
z = np.logical_and(x, y)

x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
x = Tensor(Dtype.BOOL, x.shape, x.flatten())
y = Tensor(Dtype.BOOL, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_u32"
name = "and_bool"
make_test([x, y], z, "input_0.and(@input_1)", name)

def broadcast():
x = np.random.randint(0, 6, (2, 2)).astype(np.uint32)
y = np.random.randint(0, 6, (1, 2)).astype(np.uint32)
x = (np.random.randn(3, 4, 5) > 0).astype(bool)
y = (np.random.randn(4, 5) > 0).astype(bool)
z = np.logical_and(x, y)

x = Tensor(Dtype.U32, x.shape, x.flatten())
y = Tensor(Dtype.U32, y.shape, y.flatten())
x = Tensor(Dtype.BOOL, x.shape, x.flatten())
y = Tensor(Dtype.BOOL, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_u32_broadcast"
make_test([x, y], z, "input_0.and(@input_1)", name)

default()
broadcast()

@staticmethod
def and_i32():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int32)
z = np.logical_and(x, y)

x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_i32"
make_test([x, y], z, "input_0.and(@input_1)", name)

def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int32)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int32)
z = np.logical_and(x, y)

x = Tensor(Dtype.I32, x.shape, x.flatten())
y = Tensor(Dtype.I32, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_i32_broadcast"
make_test([x, y], z, "input_0.and(@input_1)", name)

default()
broadcast()

@staticmethod
def and_i8():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.int8)
z = np.logical_and(x, y)

x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_i8"
make_test([x, y], z, "input_0.and(@input_1)", name)

def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.int8)
y = np.random.randint(-3, 3, (1, 2)).astype(np.int8)
z = np.logical_and(x, y)

x = Tensor(Dtype.I8, x.shape, x.flatten())
y = Tensor(Dtype.I8, y.shape, y.flatten())
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_i8_broadcast"
make_test([x, y], z, "input_0.and(@input_1)", name)

default()
broadcast()

@staticmethod
def and_fp8x23():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.logical_and(x, y)

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_fp8x23"
make_test([x, y], z, "input_0.and(@input_1)", name)

def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.logical_and(x, y)

x = Tensor(Dtype.FP8x23, x.shape, to_fp(
x.flatten(), FixedImpl.FP8x23))
y = Tensor(Dtype.FP8x23, y.shape, to_fp(
y.flatten(), FixedImpl.FP8x23))
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_fp8x23_broadcast"
make_test([x, y], z, "input_0.and(@input_1)", name)

default()
broadcast()

@staticmethod
def and_fp16x16():
def default():
x = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
y = np.random.randint(-3, 3, (3, 3, 3)).astype(np.float64)
z = np.logical_and(x, y)

x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_fp16x16"
make_test([x, y], z, "input_0.and(@input_1)", name)

def broadcast():
x = np.random.randint(-3, 3, (2, 2)).astype(np.float64)
y = np.random.randint(-3, 3, (1, 2)).astype(np.float64)
z = np.logical_and(x, y)

x = Tensor(Dtype.FP16x16, x.shape, to_fp(
x.flatten(), FixedImpl.FP16x16))
y = Tensor(Dtype.FP16x16, y.shape, to_fp(
y.flatten(), FixedImpl.FP16x16))
z = Tensor(Dtype.BOOL, z.shape, z.flatten())

name = "and_fp16x16_broadcast"
name = "and_bool_broadcast"
make_test([x, y], z, "input_0.and(@input_1)", name)

default()
Expand Down
41 changes: 9 additions & 32 deletions src/operators/tensor/core.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -3452,7 +3452,7 @@ trait TensorTrait<T> {
/// #tensor.and
///
/// ```rust
/// fn and(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<bool>;
/// fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool>;
/// ```
///
/// Computes the logical AND of two tensors element-wise.
Expand All @@ -3462,8 +3462,8 @@ trait TensorTrait<T> {
///
/// ## Args
///
/// * `self`(`@Tensor<T>`) - The first tensor to be compared
/// * `other`(`@Tensor<T>`) - The second tensor to be compared
/// * `self`(`@Tensor<bool>`) - The first tensor to be compared
/// * `other`(`@Tensor<bool>`) - The second tensor to be compared
///
/// ## Panics
///
Expand All @@ -3475,49 +3475,26 @@ trait TensorTrait<T> {
///
/// ## Examples
///
/// Case 1: Compare tensors with same shape
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn and_example() -> Tensor<bool> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 0, 1, 2, 0, 1, 2].span(),
/// );
///
/// return tensor_1.and(@tensor_2);
/// }
/// >>> [false, true, true, false, true, true, false, true, true]
/// ```
///
/// Case 2: Compare tensors with different shapes
///
/// ```rust
/// use array::{ArrayTrait, SpanTrait};
///
/// use orion::operators::tensor::{TensorTrait, Tensor, U32Tensor};
///
/// fn and_example() -> Tensor<bool> {
/// let tensor_1 = TensorTrait::<u32>::new(
/// shape: array![3, 3].span(), data: array![0, 1, 2, 3, 4, 5, 6, 7, 8].span(),
/// let tensor_1 = TensorTrait::<bool>::new(
/// shape: array![3, 3].span(), data: array![false, true, false, false, false, true, true, false, true, false, false, true].span(),
/// );
///
/// let tensor_2 = TensorTrait::<u32>::new(
/// shape: array![1, 3].span(), data: array![0, 1, 2].span(),
/// let tensor_2 = TensorTrait::<bool>::new(
/// shape: array![3, 3].span(), data: array![false, false, true, true, false, true, false, true, false, true, false, true].span(),
/// );
///
/// return tensor_1.and(@tensor_2);
/// }
/// >>> [false, true, true, false, true, true, false, true, true]
/// >>> [false, false, false, false, false, true, false, false, false, false, false, true]
/// ```
///
fn and(self: @Tensor<T>, other: @Tensor<T>) -> Tensor<bool>;
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool>;
/// #tensor.where
///
/// ```rust
Expand Down
2 changes: 1 addition & 1 deletion src/operators/tensor/implementations/tensor_bool.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ impl BoolTensor of TensorTrait<bool> {
}

fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
panic(array!['not supported!'])
math::and::and(self, other)
}

fn identity(self: @Tensor<bool>) -> Tensor<bool> {
Expand Down
2 changes: 1 addition & 1 deletion src/operators/tensor/implementations/tensor_fp16x16.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,7 @@ impl FP16x16Tensor of TensorTrait<FP16x16> {
core::clip(self, min, max)
}

fn and(self: @Tensor<FP16x16>, other: @Tensor<FP16x16>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ impl FP16x16WTensor of TensorTrait<FP16x16W> {
core::clip(self, min, max)
}

fn and(self: @Tensor<FP16x16W>, other: @Tensor<FP16x16W>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
2 changes: 1 addition & 1 deletion src/operators/tensor/implementations/tensor_fp32x32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ impl FP32x32Tensor of TensorTrait<FP32x32> {
core::clip(self, min, max)
}

fn and(self: @Tensor<FP32x32>, other: @Tensor<FP32x32>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
2 changes: 1 addition & 1 deletion src/operators/tensor/implementations/tensor_fp64x64.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ impl FP64x64Tensor of TensorTrait<FP64x64> {
core::clip(self, min, max)
}

fn and(self: @Tensor<FP64x64>, other: @Tensor<FP64x64>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
2 changes: 1 addition & 1 deletion src/operators/tensor/implementations/tensor_fp8x23.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ impl FP8x23Tensor of TensorTrait<FP8x23> {
core::clip(self, min, max)
}

fn and(self: @Tensor<FP8x23>, other: @Tensor<FP8x23>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -323,7 +323,7 @@ impl FP8x23WTensor of TensorTrait<FP8x23W> {
core::clip(self, min, max)
}

fn and(self: @Tensor<FP8x23W>, other: @Tensor<FP8x23W>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
2 changes: 1 addition & 1 deletion src/operators/tensor/implementations/tensor_i32.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ impl I32Tensor of TensorTrait<i32> {
core::clip(self, min, max)
}

fn and(self: @Tensor<i32>, other: @Tensor<i32>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
2 changes: 1 addition & 1 deletion src/operators/tensor/implementations/tensor_i8.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ impl I8Tensor of TensorTrait<i8> {
core::clip(self, min, max)
}

fn and(self: @Tensor<i8>, other: @Tensor<i8>) -> Tensor<bool> {
fn and(self: @Tensor<bool>, other: @Tensor<bool>) -> Tensor<bool> {
math::and::and(self, other)
}

Expand Down
Loading

0 comments on commit daab398

Please sign in to comment.