Skip to content

Commit

Permalink
change to f64
Browse files Browse the repository at this point in the history
  • Loading branch information
raphaelDkhn committed Oct 21, 2023
1 parent 99647a2 commit 0714ac6
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 20 deletions.
2 changes: 1 addition & 1 deletion src/numbers.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ impl FP8x23Number of NumberTrait<FP8x23, u32> {
}

use orion::numbers::fixed_point::implementations::fp16x16::core::{
FP16x16Impl, FP16x16, FP16x16IntoFP32x32
FP16x16Impl, FP16x16, FP16x16IntoFP64x64
};
use orion::numbers::fixed_point::implementations::fp16x16::math::core as core_fp16x16;
use orion::numbers::fixed_point::implementations::fp16x16::math::comp as comp_fp16x16;
Expand Down
20 changes: 10 additions & 10 deletions src/numbers/fixed_point/implementations/fp16x16/core.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use result::{ResultTrait, ResultTraitImpl};
use traits::{TryInto, Into};

use orion::numbers::signed_integer::{i32::i32, i8::i8};
use orion::numbers::{FP32x32, FP32x32Impl};
use orion::numbers::{FP64x64, FP64x64Impl};
use orion::numbers::fixed_point::core::FixedTrait;
use orion::numbers::fixed_point::implementations::fp16x16::math::{core, trig, hyp};
use orion::numbers::fixed_point::utils;
Expand Down Expand Up @@ -193,28 +193,28 @@ impl FP16x16Print of PrintTrait<FP16x16> {
}
}

impl FP16x16IntoFP32x32 of Into<FP16x16, FP32x32> {
fn into(self: FP16x16) -> FP32x32 {
return FP32x32 { mag: self.mag.into() * 65536_u64, sign: self.sign };
impl FP16x16IntoFP64x64 of Into<FP16x16, FP64x64> {
fn into(self: FP16x16) -> FP64x64 {
return FP64x64 { mag: self.mag.into() * 281474976710656_u128, sign: self.sign };
}
}

#[test]
fn test_fp16x16_into_fp32x32() {
let a = FP16x16Impl::new_unscaled(42, true);
let b: FP32x32 = a.into();
assert(b.mag == 180388626432, 'invalid conversion');
let b: FP64x64 = a.into();
assert(b == FP64x64Impl::new_unscaled(42, true), 'invalid conversion');
}

impl FP32x32TryIntoFP16x16 of TryInto<FP32x32, FP16x16> {
fn try_into(self: FP32x32) -> Option<FP16x16> {
Option::Some(FP16x16 { mag: (self.mag / 65536).try_into().unwrap(), sign: self.sign })
impl FP64x64TryIntoFP16x16 of TryInto<FP64x64, FP16x16> {
fn try_into(self: FP64x64) -> Option<FP16x16> {
Option::Some(FP16x16 { mag: (self.mag / 281474976710656_u128).try_into().unwrap(), sign: self.sign })
}
}

#[test]
fn test_fp32x32_try_into_fp16x16() {
let a = FP32x32Impl::new_unscaled(42, true);
let a = FP64x64Impl::new_unscaled(42, true);
let b: FP16x16 = a.try_into().unwrap();
assert(b == FP16x16Impl::new_unscaled(42, true), 'invalid conversion');
}
Expand Down
6 changes: 3 additions & 3 deletions src/operators/nn/implementations/nn_fp16x16.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ use core::option::OptionTrait;
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::{FP16x16, FP16x16IntoFP32x32, FP32x32, FP32x32Impl};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd, FP32x32Tensor};
use orion::numbers::{FP16x16, FP16x16IntoFP64x64, FP64x64, FP64x64Impl};
use orion::operators::tensor::{FP16x16Tensor, FP16x16TensorDiv, FP16x16TensorAdd, FP64x64Tensor};

impl FP16x16NN of NNTrait<FP16x16> {
fn relu(tensor: @Tensor<FP16x16>) -> Tensor<FP16x16> {
Expand All @@ -16,7 +16,7 @@ impl FP16x16NN of NNTrait<FP16x16> {
}

fn softmax(tensor: @Tensor<FP16x16>, axis: usize, wide: bool) -> Tensor<FP16x16> {
functional::softmax::softmax::<FP16x16, u32, FP32x32, u64>(tensor, axis, wide)
functional::softmax::softmax::<FP16x16, u32, FP64x64, u128>(tensor, axis, wide)
}

fn logsoftmax(tensor: @Tensor<FP16x16>, axis: usize) -> Tensor<FP16x16> {
Expand Down
10 changes: 4 additions & 6 deletions src/operators/nn/implementations/nn_fp8x23.cairo
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,8 @@ use core::option::OptionTrait;
use orion::operators::tensor::core::Tensor;
use orion::operators::nn::core::NNTrait;
use orion::operators::nn::functional;
use orion::numbers::{FP8x23, FP8x23IntoFP32x32, FP32x32, FP32x32Impl};
use orion::operators::tensor::{
FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd, FP32x32Tensor
};

use orion::numbers::{FP8x23, FP32x32, FP32x32Impl};
use orion::operators::tensor::{FP8x23Tensor, FP8x23TensorDiv, FP8x23TensorAdd, FP32x32Tensor};


impl FP8x23NN of NNTrait<FP8x23> {
Expand All @@ -20,7 +17,8 @@ impl FP8x23NN of NNTrait<FP8x23> {
}

fn softmax(tensor: @Tensor<FP8x23>, axis: usize, wide: bool) -> Tensor<FP8x23> {
functional::softmax::softmax::<FP8x23, u32, FP32x32, u64>(tensor, axis, wide)
// functional::softmax::softmax::<FP8x23, u32, FP32x32, u64>(tensor, axis, wide)
panic(array!['not supported!'])
}

fn logsoftmax(tensor: @Tensor<FP8x23>, axis: usize) -> Tensor<FP8x23> {
Expand Down

0 comments on commit 0714ac6

Please sign in to comment.