From 4d3ac1cbb383f1719da96cd3b7e7d678c0ce87bc Mon Sep 17 00:00:00 2001 From: Franziskus Kiefer Date: Wed, 11 Oct 2023 14:01:05 +0200 Subject: [PATCH] update HACL to 1b30697fc2b0d8d5e2f541eccfd3fb52b45b905c (#429) --- CMakeLists.txt | 6 +- include/Hacl_RSAPSS.h | 45 +- include/internal/Hacl_Bignum25519_51.h | 4 + include/msvc/Hacl_RSAPSS.h | 45 +- include/msvc/internal/Hacl_Bignum25519_51.h | 4 + info.txt | 6 +- karamel/include/krml/internal/target.h | 8 + karamel/krmllib/dist/minimal/Makefile.basic | 56 --- karamel/krmllib/dist/minimal/Makefile.include | 5 - .../dist/minimal/fstar_uint128_gcc64.h | 4 +- karamel/krmllib/dist/minimal/libkrmllib.def | 11 - ocaml/ctypes.depend | 10 +- src/EverCrypt_AEAD.c | 438 +++++++++++------- src/EverCrypt_AutoConfig2.c | 48 +- src/EverCrypt_DRBG.c | 4 + src/EverCrypt_Hash.c | 5 +- src/EverCrypt_Poly1305.c | 16 +- src/Hacl_Chacha20_Vec128.c | 6 +- src/Hacl_Chacha20_Vec256.c | 6 +- src/Hacl_Chacha20_Vec32.c | 6 +- src/Hacl_Curve25519_64.c | 18 +- src/Hacl_Ed25519.c | 84 ++-- src/Hacl_FFDHE.c | 7 +- src/Hacl_Frodo_KEM.c | 2 +- src/Hacl_HMAC_DRBG.c | 3 + src/Hacl_Hash_Blake2.c | 2 + src/Hacl_Hash_Blake2b_256.c | 1 + src/Hacl_Hash_Blake2s_128.c | 1 + src/Hacl_Hash_MD5.c | 1 - src/Hacl_Hash_SHA1.c | 1 - src/Hacl_Hash_SHA2.c | 4 - src/Hacl_Hash_SHA3.c | 7 +- src/Hacl_K256_ECDSA.c | 28 +- src/Hacl_RSAPSS.c | 45 +- src/Hacl_Salsa20.c | 8 +- src/Hacl_Streaming_Blake2.c | 2 - src/Hacl_Streaming_Blake2b_256.c | 1 - src/Hacl_Streaming_Blake2s_128.c | 1 - src/Hacl_Streaming_Poly1305_128.c | 3 +- src/Hacl_Streaming_Poly1305_256.c | 3 +- src/Hacl_Streaming_Poly1305_32.c | 1 - src/msvc/EverCrypt_AEAD.c | 438 +++++++++++------- src/msvc/EverCrypt_AutoConfig2.c | 48 +- src/msvc/EverCrypt_DRBG.c | 4 + src/msvc/EverCrypt_Hash.c | 5 +- src/msvc/EverCrypt_Poly1305.c | 16 +- src/msvc/Hacl_Chacha20_Vec128.c | 6 +- src/msvc/Hacl_Chacha20_Vec256.c | 6 +- src/msvc/Hacl_Chacha20_Vec32.c | 6 +- src/msvc/Hacl_Curve25519_64.c | 18 +- src/msvc/Hacl_Ed25519.c | 84 ++-- src/msvc/Hacl_FFDHE.c | 7 +- src/msvc/Hacl_Frodo_KEM.c | 2 +- src/msvc/Hacl_HMAC_DRBG.c | 3 + src/msvc/Hacl_Hash_Blake2.c | 2 + src/msvc/Hacl_Hash_Blake2b_256.c | 1 + src/msvc/Hacl_Hash_Blake2s_128.c | 1 + src/msvc/Hacl_Hash_MD5.c | 1 - src/msvc/Hacl_Hash_SHA1.c | 1 - src/msvc/Hacl_Hash_SHA2.c | 4 - src/msvc/Hacl_Hash_SHA3.c | 7 +- src/msvc/Hacl_K256_ECDSA.c | 28 +- src/msvc/Hacl_RSAPSS.c | 45 +- src/msvc/Hacl_Salsa20.c | 8 +- src/msvc/Hacl_Streaming_Blake2.c | 2 - src/msvc/Hacl_Streaming_Blake2b_256.c | 1 - src/msvc/Hacl_Streaming_Blake2s_128.c | 1 - src/msvc/Hacl_Streaming_Poly1305_128.c | 3 +- src/msvc/Hacl_Streaming_Poly1305_256.c | 3 +- src/msvc/Hacl_Streaming_Poly1305_32.c | 1 - src/wasm/EverCrypt_Hash.wasm | Bin 49374 -> 49373 bytes src/wasm/Hacl_Bignum.wasm | Bin 78522 -> 78554 bytes src/wasm/Hacl_Bignum256.wasm | Bin 100214 -> 100226 bytes src/wasm/Hacl_Bignum256_32.wasm | Bin 41067 -> 41067 bytes src/wasm/Hacl_Bignum32.wasm | Bin 15248 -> 15248 bytes src/wasm/Hacl_Bignum4096.wasm | Bin 63798 -> 63810 bytes src/wasm/Hacl_Bignum4096_32.wasm | Bin 32319 -> 32319 bytes src/wasm/Hacl_Bignum64.wasm | Bin 24421 -> 24432 bytes src/wasm/Hacl_Chacha20Poly1305_32.wasm | Bin 7661 -> 7657 bytes src/wasm/Hacl_Chacha20_Vec32.wasm | Bin 5552 -> 5544 bytes src/wasm/Hacl_Curve25519_51.wasm | Bin 7170 -> 7166 bytes src/wasm/Hacl_GenericField32.wasm | Bin 10727 -> 10731 bytes src/wasm/Hacl_GenericField64.wasm | Bin 11708 -> 11718 bytes src/wasm/Hacl_HMAC.wasm | Bin 29842 -> 29855 bytes src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm | Bin 21304 -> 21306 bytes src/wasm/Hacl_HPKE_Curve51_CP32_SHA512.wasm | Bin 21432 -> 21434 bytes src/wasm/Hacl_Hash_MD5.wasm | Bin 15550 -> 15558 bytes src/wasm/Hacl_Hash_SHA1.wasm | Bin 13140 -> 13148 bytes src/wasm/Hacl_Hash_SHA3.wasm | Bin 17639 -> 17611 bytes src/wasm/Hacl_K256_ECDSA.wasm | Bin 98203 -> 98193 bytes src/wasm/Hacl_NaCl.wasm | Bin 5031 -> 5027 bytes src/wasm/Hacl_P256.wasm | Bin 83233 -> 83213 bytes src/wasm/Hacl_Salsa20.wasm | Bin 10032 -> 10024 bytes src/wasm/INFO.txt | 4 +- 94 files changed, 927 insertions(+), 785 deletions(-) delete mode 100644 karamel/krmllib/dist/minimal/Makefile.basic delete mode 100644 karamel/krmllib/dist/minimal/Makefile.include delete mode 100644 karamel/krmllib/dist/minimal/libkrmllib.def diff --git a/CMakeLists.txt b/CMakeLists.txt index 825b1192..dcdd2f68 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -93,12 +93,9 @@ if(NOT MSVC) # -pedantic # -Wconversion # -Wsign-conversion - # -Werror=gcc-compat $<$:-g> $<$:-Og> $<$:-O3> - # $<$:-g> - # $<$:-Wno-deprecated-declarations> ) endif() @@ -337,6 +334,9 @@ configure_file(config/Config.h.in config.h) # Now combine everything into the hacl library # # Dynamic library add_library(hacl SHARED ${SOURCES_std} ${VALE_OBJECTS}) +if(NOT MSVC) + target_compile_options(hacl PRIVATE -Wsign-conversion -Wconversion -Wall -Wextra -pedantic) +endif() if(TOOLCHAIN_CAN_COMPILE_VEC128 AND HACL_VEC128_O) add_dependencies(hacl hacl_vec128) diff --git a/include/Hacl_RSAPSS.h b/include/Hacl_RSAPSS.h index 8f4de949..90bd69ce 100644 --- a/include/Hacl_RSAPSS.h +++ b/include/Hacl_RSAPSS.h @@ -43,9 +43,9 @@ extern "C" { Sign a message `msg` and write the signature to `sgnt`. @param a Hash algorithm to use. Allowed values for `a` are ... - * Spec_Hash_Definitions_SHA2_256, - * Spec_Hash_Definitions_SHA2_384, and - * Spec_Hash_Definitions_SHA2_512. + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. @@ -75,7 +75,10 @@ Hacl_RSAPSS_rsapss_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param pkey Pointer to public key created by `Hacl_RSAPSS_new_rsapss_load_pkey`. @@ -105,10 +108,10 @@ Load a public key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. -@return Returns an allocated public key. Note: caller must take care to `free()` the created key. +@return Returns an allocated public key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb); @@ -119,11 +122,11 @@ Load a secret key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. -@return Returns an allocated secret key. Note: caller must take care to `free()` the created key. +@return Returns an allocated secret key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_skey( @@ -138,13 +141,16 @@ uint64_t /** Sign a message `msg` and write the signature to `sgnt`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param salt Pointer to `saltLen` bytes where the salt is read from. @param msgLen Length of message. @@ -172,11 +178,14 @@ Hacl_RSAPSS_rsapss_skey_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param sgntLen Length of signature. @param sgnt Pointer to `sgntLen` bytes where the signature is read from. diff --git a/include/internal/Hacl_Bignum25519_51.h b/include/internal/Hacl_Bignum25519_51.h index 9fe5e9fc..25a10503 100644 --- a/include/internal/Hacl_Bignum25519_51.h +++ b/include/internal/Hacl_Bignum25519_51.h @@ -84,6 +84,7 @@ Hacl_Impl_Curve25519_Field51_fmul( FStar_UInt128_uint128 *uu___ ) { + KRML_HOST_IGNORE(uu___); uint64_t f10 = f1[0U]; uint64_t f11 = f1[1U]; uint64_t f12 = f1[2U]; @@ -167,6 +168,7 @@ Hacl_Impl_Curve25519_Field51_fmul2( FStar_UInt128_uint128 *uu___ ) { + KRML_HOST_IGNORE(uu___); uint64_t f10 = f1[0U]; uint64_t f11 = f1[1U]; uint64_t f12 = f1[2U]; @@ -371,6 +373,7 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f static inline void Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___) { + KRML_HOST_IGNORE(uu___); uint64_t f0 = f[0U]; uint64_t f1 = f[1U]; uint64_t f2 = f[2U]; @@ -446,6 +449,7 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint static inline void Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___) { + KRML_HOST_IGNORE(uu___); uint64_t f10 = f[0U]; uint64_t f11 = f[1U]; uint64_t f12 = f[2U]; diff --git a/include/msvc/Hacl_RSAPSS.h b/include/msvc/Hacl_RSAPSS.h index 8f4de949..90bd69ce 100644 --- a/include/msvc/Hacl_RSAPSS.h +++ b/include/msvc/Hacl_RSAPSS.h @@ -43,9 +43,9 @@ extern "C" { Sign a message `msg` and write the signature to `sgnt`. @param a Hash algorithm to use. Allowed values for `a` are ... - * Spec_Hash_Definitions_SHA2_256, - * Spec_Hash_Definitions_SHA2_384, and - * Spec_Hash_Definitions_SHA2_512. + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. @@ -75,7 +75,10 @@ Hacl_RSAPSS_rsapss_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param pkey Pointer to public key created by `Hacl_RSAPSS_new_rsapss_load_pkey`. @@ -105,10 +108,10 @@ Load a public key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. -@return Returns an allocated public key. Note: caller must take care to `free()` the created key. +@return Returns an allocated public key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb); @@ -119,11 +122,11 @@ Load a secret key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. -@return Returns an allocated secret key. Note: caller must take care to `free()` the created key. +@return Returns an allocated secret key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_skey( @@ -138,13 +141,16 @@ uint64_t /** Sign a message `msg` and write the signature to `sgnt`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param salt Pointer to `saltLen` bytes where the salt is read from. @param msgLen Length of message. @@ -172,11 +178,14 @@ Hacl_RSAPSS_rsapss_skey_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param sgntLen Length of signature. @param sgnt Pointer to `sgntLen` bytes where the signature is read from. diff --git a/include/msvc/internal/Hacl_Bignum25519_51.h b/include/msvc/internal/Hacl_Bignum25519_51.h index 9fe5e9fc..25a10503 100644 --- a/include/msvc/internal/Hacl_Bignum25519_51.h +++ b/include/msvc/internal/Hacl_Bignum25519_51.h @@ -84,6 +84,7 @@ Hacl_Impl_Curve25519_Field51_fmul( FStar_UInt128_uint128 *uu___ ) { + KRML_HOST_IGNORE(uu___); uint64_t f10 = f1[0U]; uint64_t f11 = f1[1U]; uint64_t f12 = f1[2U]; @@ -167,6 +168,7 @@ Hacl_Impl_Curve25519_Field51_fmul2( FStar_UInt128_uint128 *uu___ ) { + KRML_HOST_IGNORE(uu___); uint64_t f10 = f1[0U]; uint64_t f11 = f1[1U]; uint64_t f12 = f1[2U]; @@ -371,6 +373,7 @@ static inline void Hacl_Impl_Curve25519_Field51_fmul1(uint64_t *out, uint64_t *f static inline void Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___) { + KRML_HOST_IGNORE(uu___); uint64_t f0 = f[0U]; uint64_t f1 = f[1U]; uint64_t f2 = f[2U]; @@ -446,6 +449,7 @@ Hacl_Impl_Curve25519_Field51_fsqr(uint64_t *out, uint64_t *f, FStar_UInt128_uint static inline void Hacl_Impl_Curve25519_Field51_fsqr2(uint64_t *out, uint64_t *f, FStar_UInt128_uint128 *uu___) { + KRML_HOST_IGNORE(uu___); uint64_t f10 = f[0U]; uint64_t f11 = f[1U]; uint64_t f12 = f[2U]; diff --git a/info.txt b/info.txt index 1a29e888..af3dbf98 100644 --- a/info.txt +++ b/info.txt @@ -1,5 +1,5 @@ The code was generated with the following toolchain. -F* version: 155853a14336aa0713dba7db5408f4c8ab512a06 -KaRaMeL version: db63c1de17565be0ec4989f58532717a04e3ff40 -HACL* version: ad60c9d98c9ce8f6a4fa13090511fa4b3a2c137b +F* version: bc622701c668f6b4092760879372968265d4a4e1 +KaRaMeL version: 7cffd27cfefbd220e986e561e8d350f043609f76 +HACL* version: 1b30697fc2b0d8d5e2f541eccfd3fb52b45b905c Vale version: 0.3.19 diff --git a/karamel/include/krml/internal/target.h b/karamel/include/krml/internal/target.h index 634c20fc..4903d224 100644 --- a/karamel/include/krml/internal/target.h +++ b/karamel/include/krml/internal/target.h @@ -57,6 +57,14 @@ # define KRML_HOST_IGNORE(x) (void)(x) #endif +#ifndef KRML_MAYBE_UNUSED +# if defined(__GNUC__) +# define KRML_MAYBE_UNUSED __attribute__((unused)) +# else +# define KRML_MAYBE_UNUSED +# endif +#endif + #ifndef KRML_NOINLINE # if defined(_MSC_VER) # define KRML_NOINLINE __declspec(noinline) diff --git a/karamel/krmllib/dist/minimal/Makefile.basic b/karamel/krmllib/dist/minimal/Makefile.basic deleted file mode 100644 index d7a1fdfd..00000000 --- a/karamel/krmllib/dist/minimal/Makefile.basic +++ /dev/null @@ -1,56 +0,0 @@ -# A basic Makefile that KaRaMeL copies in the output directory; this is not -# guaranteed to work and will only work well for very simple projects. This -# Makefile uses: -# - the custom C files passed to your krml invocation -# - the custom C flags passed to your krml invocation -# - the -o option passed to your krml invocation - -include Makefile.include - -ifeq (,$(KRML_HOME)) - $(error please define KRML_HOME to point to the root of your KaRaMeL git checkout) -endif - -CFLAGS += -I. -I $(KRML_HOME)/include -I $(KRML_HOME)/krmllib/dist/minimal -CFLAGS += -Wall -Wextra -Werror -std=c11 -Wno-unused-variable \ - -Wno-unknown-warning-option -Wno-unused-but-set-variable -Wno-unused-function \ - -Wno-unused-parameter -Wno-infinite-recursion \ - -g -fwrapv -D_BSD_SOURCE -D_DEFAULT_SOURCE -ifeq ($(OS),Windows_NT) -CFLAGS += -D__USE_MINGW_ANSI_STDIO -else -CFLAGS += -fPIC -endif -CFLAGS += $(USER_CFLAGS) - -SOURCES += $(ALL_C_FILES) $(USER_C_FILES) -ifneq (,$(BLACKLIST)) - SOURCES := $(filter-out $(BLACKLIST),$(SOURCES)) -endif -OBJS += $(patsubst %.c,%.o,$(SOURCES)) - -all: $(USER_TARGET) - -$(USER_TARGET): $(OBJS) - -AR ?= ar - -%.a: - $(AR) cr $@ $^ - -%.exe: - $(CC) $(CFLAGS) -o $@ $^ $(KRML_HOME)/krmllib/dist/generic/libkrmllib.a - -%.so: - $(CC) $(CFLAGS) -shared -o $@ $^ - -%.d: %.c - @set -e; rm -f $@; \ - $(CC) -MM -MG $(CFLAGS) $< > $@.$$$$; \ - sed 's,\($(notdir $*)\)\.o[ :]*,$(dir $@)\1.o $@ : ,g' < $@.$$$$ > $@; \ - rm -f $@.$$$$ - -include $(patsubst %.c,%.d,$(SOURCES)) - -clean: - rm -rf *.o *.d $(USER_TARGET) diff --git a/karamel/krmllib/dist/minimal/Makefile.include b/karamel/krmllib/dist/minimal/Makefile.include deleted file mode 100644 index ad532171..00000000 --- a/karamel/krmllib/dist/minimal/Makefile.include +++ /dev/null @@ -1,5 +0,0 @@ -USER_TARGET=libkrmllib.a -USER_CFLAGS= -USER_C_FILES=fstar_uint128.c -ALL_C_FILES= -ALL_H_FILES=FStar_UInt128.h FStar_UInt_8_16_32_64.h LowStar_Endianness.h diff --git a/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h b/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h index e40304b2..ae109004 100644 --- a/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h +++ b/karamel/krmllib/dist/minimal/fstar_uint128_gcc64.h @@ -110,7 +110,7 @@ inline static uint128_t FStar_UInt128_mul_wide(uint64_t x, uint64_t y) { inline static uint128_t FStar_UInt128_eq_mask(uint128_t x, uint128_t y) { uint64_t mask = FStar_UInt64_eq_mask((uint64_t)(x >> 64), (uint64_t)(y >> 64)) & - FStar_UInt64_eq_mask(x, y); + FStar_UInt64_eq_mask((uint64_t)x, (uint64_t)y); return ((uint128_t)mask) << 64 | mask; } @@ -118,7 +118,7 @@ inline static uint128_t FStar_UInt128_gte_mask(uint128_t x, uint128_t y) { uint64_t mask = (FStar_UInt64_gte_mask(x >> 64, y >> 64) & ~(FStar_UInt64_eq_mask(x >> 64, y >> 64))) | - (FStar_UInt64_eq_mask(x >> 64, y >> 64) & FStar_UInt64_gte_mask(x, y)); + (FStar_UInt64_eq_mask(x >> 64, y >> 64) & FStar_UInt64_gte_mask((uint64_t)x, (uint64_t)y)); return ((uint128_t)mask) << 64 | mask; } diff --git a/karamel/krmllib/dist/minimal/libkrmllib.def b/karamel/krmllib/dist/minimal/libkrmllib.def deleted file mode 100644 index c4ab8e38..00000000 --- a/karamel/krmllib/dist/minimal/libkrmllib.def +++ /dev/null @@ -1,11 +0,0 @@ -LIBRARY libkrmllib - -EXPORTS - FStar_UInt64_eq_mask - FStar_UInt64_gte_mask - FStar_UInt32_eq_mask - FStar_UInt32_gte_mask - FStar_UInt16_eq_mask - FStar_UInt16_gte_mask - FStar_UInt8_eq_mask - FStar_UInt8_gte_mask diff --git a/ocaml/ctypes.depend b/ocaml/ctypes.depend index 86af86df..31393b5e 100644 --- a/ocaml/ctypes.depend +++ b/ocaml/ctypes.depend @@ -1,4 +1,4 @@ -CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2_stubs.cmx lib/Hacl_Hash_Blake2_bindings.cmx lib/Hacl_Hash_Blake2b_256_stubs.cmx lib/Hacl_Hash_Blake2b_256_bindings.cmx lib/Hacl_Hash_Blake2s_128_stubs.cmx lib/Hacl_Hash_Blake2s_128_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_Poly1305_32_stubs.cmx lib/Hacl_Poly1305_32_bindings.cmx lib/Hacl_Poly1305_128_stubs.cmx lib/Hacl_Poly1305_128_bindings.cmx lib/Hacl_Poly1305_256_stubs.cmx lib/Hacl_Poly1305_256_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_Chacha20Poly1305_128_stubs.cmx lib/Hacl_Chacha20Poly1305_128_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_Chacha20Poly1305_256_stubs.cmx lib/Hacl_Chacha20Poly1305_256_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_Chacha20Poly1305_32_stubs.cmx lib/Hacl_Chacha20Poly1305_32_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/Hacl_Streaming_Poly1305_32_stubs.cmx lib/Hacl_Streaming_Poly1305_32_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_Streaming_Blake2_stubs.cmx lib/Hacl_Streaming_Blake2_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx +CTYPES_DEPS=lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Spec_stubs.cmx lib/Hacl_Spec_bindings.cmx lib/Hacl_Hash_Blake2_stubs.cmx lib/Hacl_Hash_Blake2_bindings.cmx lib/Hacl_Hash_Blake2b_256_stubs.cmx lib/Hacl_Hash_Blake2b_256_bindings.cmx lib/Hacl_Hash_Blake2s_128_stubs.cmx lib/Hacl_Hash_Blake2s_128_bindings.cmx lib/Hacl_Hash_SHA3_stubs.cmx lib/Hacl_Hash_SHA3_bindings.cmx lib/Hacl_Hash_Base_stubs.cmx lib/Hacl_Hash_Base_bindings.cmx lib/Hacl_Hash_MD5_stubs.cmx lib/Hacl_Hash_MD5_bindings.cmx lib/Hacl_Hash_SHA1_stubs.cmx lib/Hacl_Hash_SHA1_bindings.cmx lib/Hacl_SHA2_Types_stubs.cmx lib/Hacl_SHA2_Types_bindings.cmx lib/Hacl_Hash_SHA2_stubs.cmx lib/Hacl_Hash_SHA2_bindings.cmx lib/EverCrypt_Error_stubs.cmx lib/EverCrypt_Error_bindings.cmx lib/EverCrypt_AutoConfig2_stubs.cmx lib/EverCrypt_AutoConfig2_bindings.cmx lib/EverCrypt_Hash_stubs.cmx lib/EverCrypt_Hash_bindings.cmx lib/Hacl_Chacha20_stubs.cmx lib/Hacl_Chacha20_bindings.cmx lib/Hacl_Salsa20_stubs.cmx lib/Hacl_Salsa20_bindings.cmx lib/Hacl_Bignum_Base_stubs.cmx lib/Hacl_Bignum_Base_bindings.cmx lib/Hacl_Bignum_stubs.cmx lib/Hacl_Bignum_bindings.cmx lib/Hacl_Curve25519_64_stubs.cmx lib/Hacl_Curve25519_64_bindings.cmx lib/Hacl_Bignum25519_51_stubs.cmx lib/Hacl_Bignum25519_51_bindings.cmx lib/Hacl_Curve25519_51_stubs.cmx lib/Hacl_Curve25519_51_bindings.cmx lib/Hacl_Ed25519_stubs.cmx lib/Hacl_Ed25519_bindings.cmx lib/Hacl_Poly1305_32_stubs.cmx lib/Hacl_Poly1305_32_bindings.cmx lib/Hacl_Poly1305_128_stubs.cmx lib/Hacl_Poly1305_128_bindings.cmx lib/Hacl_Poly1305_256_stubs.cmx lib/Hacl_Poly1305_256_bindings.cmx lib/Hacl_NaCl_stubs.cmx lib/Hacl_NaCl_bindings.cmx lib/Hacl_P256_stubs.cmx lib/Hacl_P256_bindings.cmx lib/Hacl_Bignum_K256_stubs.cmx lib/Hacl_Bignum_K256_bindings.cmx lib/Hacl_K256_ECDSA_stubs.cmx lib/Hacl_K256_ECDSA_bindings.cmx lib/Hacl_Frodo_KEM_stubs.cmx lib/Hacl_Frodo_KEM_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_IntTypes_Intrinsics_stubs.cmx lib/Hacl_IntTypes_Intrinsics_bindings.cmx lib/Hacl_IntTypes_Intrinsics_128_stubs.cmx lib/Hacl_IntTypes_Intrinsics_128_bindings.cmx lib/Hacl_RSAPSS_stubs.cmx lib/Hacl_RSAPSS_bindings.cmx lib/Hacl_FFDHE_stubs.cmx lib/Hacl_FFDHE_bindings.cmx lib/Hacl_Frodo640_stubs.cmx lib/Hacl_Frodo640_bindings.cmx lib/Hacl_Chacha20_Vec128_stubs.cmx lib/Hacl_Chacha20_Vec128_bindings.cmx lib/Hacl_Chacha20Poly1305_128_stubs.cmx lib/Hacl_Chacha20Poly1305_128_bindings.cmx lib/Hacl_HMAC_stubs.cmx lib/Hacl_HMAC_bindings.cmx lib/Hacl_HKDF_stubs.cmx lib/Hacl_HKDF_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA512_bindings.cmx lib/EverCrypt_Cipher_stubs.cmx lib/EverCrypt_Cipher_bindings.cmx lib/Hacl_GenericField32_stubs.cmx lib/Hacl_GenericField32_bindings.cmx lib/Hacl_SHA2_Vec256_stubs.cmx lib/Hacl_SHA2_Vec256_bindings.cmx lib/Hacl_EC_K256_stubs.cmx lib/Hacl_EC_K256_bindings.cmx lib/Hacl_Bignum4096_stubs.cmx lib/Hacl_Bignum4096_bindings.cmx lib/Hacl_Chacha20_Vec32_stubs.cmx lib/Hacl_Chacha20_Vec32_bindings.cmx lib/EverCrypt_Ed25519_stubs.cmx lib/EverCrypt_Ed25519_bindings.cmx lib/Hacl_Bignum4096_32_stubs.cmx lib/Hacl_Bignum4096_32_bindings.cmx lib/EverCrypt_HMAC_stubs.cmx lib/EverCrypt_HMAC_bindings.cmx lib/Hacl_HMAC_DRBG_stubs.cmx lib/Hacl_HMAC_DRBG_bindings.cmx lib/EverCrypt_DRBG_stubs.cmx lib/EverCrypt_DRBG_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA512_bindings.cmx lib/Hacl_HPKE_P256_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP128_SHA256_bindings.cmx lib/EverCrypt_Curve25519_stubs.cmx lib/EverCrypt_Curve25519_bindings.cmx lib/Hacl_Chacha20_Vec256_stubs.cmx lib/Hacl_Chacha20_Vec256_bindings.cmx lib/Hacl_Chacha20Poly1305_256_stubs.cmx lib/Hacl_Chacha20Poly1305_256_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA512_bindings.cmx lib/Hacl_Frodo976_stubs.cmx lib/Hacl_Frodo976_bindings.cmx lib/Hacl_HMAC_Blake2s_128_stubs.cmx lib/Hacl_HMAC_Blake2s_128_bindings.cmx lib/Hacl_HKDF_Blake2s_128_stubs.cmx lib/Hacl_HKDF_Blake2s_128_bindings.cmx lib/Hacl_GenericField64_stubs.cmx lib/Hacl_GenericField64_bindings.cmx lib/Hacl_Frodo1344_stubs.cmx lib/Hacl_Frodo1344_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA512_bindings.cmx lib/Hacl_Bignum32_stubs.cmx lib/Hacl_Bignum32_bindings.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP128_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP128_SHA256_bindings.cmx lib/Hacl_Bignum256_32_stubs.cmx lib/Hacl_Bignum256_32_bindings.cmx lib/Hacl_SHA2_Vec128_stubs.cmx lib/Hacl_SHA2_Vec128_bindings.cmx lib/Hacl_Chacha20Poly1305_32_stubs.cmx lib/Hacl_Chacha20Poly1305_32_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib/EverCrypt_Poly1305_stubs.cmx lib/EverCrypt_Poly1305_bindings.cmx lib/Hacl_Streaming_Poly1305_32_stubs.cmx lib/Hacl_Streaming_Poly1305_32_bindings.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve51_CP32_SHA512_bindings.cmx lib/Hacl_Streaming_Blake2_stubs.cmx lib/Hacl_Streaming_Blake2_bindings.cmx lib/Hacl_HPKE_P256_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP256_SHA256_bindings.cmx lib/Hacl_HPKE_P256_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_P256_CP32_SHA256_bindings.cmx lib/Hacl_Bignum64_stubs.cmx lib/Hacl_Bignum64_bindings.cmx lib/Hacl_Frodo64_stubs.cmx lib/Hacl_Frodo64_bindings.cmx lib/Hacl_HMAC_Blake2b_256_stubs.cmx lib/Hacl_HMAC_Blake2b_256_bindings.cmx lib/Hacl_HKDF_Blake2b_256_stubs.cmx lib/Hacl_HKDF_Blake2b_256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA256_bindings.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_stubs.cmx lib/Hacl_HPKE_Curve64_CP32_SHA512_bindings.cmx lib/EverCrypt_HKDF_stubs.cmx lib/EverCrypt_HKDF_bindings.cmx lib/Hacl_EC_Ed25519_stubs.cmx lib/Hacl_EC_Ed25519_bindings.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_stubs.cmx lib/Hacl_HPKE_Curve51_CP256_SHA256_bindings.cmx lib/EverCrypt_Chacha20Poly1305_stubs.cmx lib/EverCrypt_Chacha20Poly1305_bindings.cmx lib/EverCrypt_AEAD_stubs.cmx lib/EverCrypt_AEAD_bindings.cmx lib/Hacl_Bignum256_stubs.cmx lib/Hacl_Bignum256_bindings.cmx lib/Hacl_Streaming_Types_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmo: lib_gen/Hacl_Streaming_Types_gen.cmx: lib/Hacl_Streaming_Types_bindings.cmx @@ -283,14 +283,14 @@ lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Imp lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx: lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve51_CP32_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve51_CP32_SHA256_gen.cmx -lib/EverCrypt_Poly1305_bindings.cmx: -lib/EverCrypt_Poly1305_bindings.cmo: -lib_gen/EverCrypt_Poly1305_gen.cmx: lib/EverCrypt_Poly1305_bindings.cmx -lib_gen/EverCrypt_Poly1305_gen.exe: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmo: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmo lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmo lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx: lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.exe: lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_bindings.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_stubs.cmx lib/Hacl_HPKE_Interface_Hacl_Impl_HPKE_Hacl_Meta_HPKE_c_stubs.o lib/Hacl_HPKE_Curve64_CP256_SHA256_bindings.cmx lib_gen/Hacl_HPKE_Curve64_CP256_SHA256_gen.cmx +lib/EverCrypt_Poly1305_bindings.cmx: +lib/EverCrypt_Poly1305_bindings.cmo: +lib_gen/EverCrypt_Poly1305_gen.cmx: lib/EverCrypt_Poly1305_bindings.cmx +lib_gen/EverCrypt_Poly1305_gen.exe: lib/EverCrypt_Poly1305_bindings.cmx lib_gen/EverCrypt_Poly1305_gen.cmx lib/Hacl_Streaming_Poly1305_32_bindings.cmx: lib/Hacl_Streaming_Types_bindings.cmx lib/Hacl_Streaming_Types_stubs.cmx lib/Hacl_Streaming_Poly1305_32_bindings.cmo: lib/Hacl_Streaming_Types_bindings.cmo lib/Hacl_Streaming_Types_stubs.cmo lib_gen/Hacl_Streaming_Poly1305_32_gen.cmx: lib/Hacl_Streaming_Poly1305_32_bindings.cmx diff --git a/src/EverCrypt_AEAD.c b/src/EverCrypt_AEAD.c index 564dbc2e..d3a4ffbe 100644 --- a/src/EverCrypt_AEAD.c +++ b/src/EverCrypt_AEAD.c @@ -46,6 +46,8 @@ The state may be reused as many times as desired. */ bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee) { + KRML_HOST_IGNORE(a); + KRML_HOST_IGNORE(projectee); return true; } @@ -58,8 +60,7 @@ Return the algorithm used in the AEAD state. */ Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s) { - EverCrypt_AEAD_state_s scrut = *s; - Spec_Cipher_Expansion_impl impl = scrut.impl; + Spec_Cipher_Expansion_impl impl = (*s).impl; switch (impl) { case Spec_Cipher_Expansion_Hacl_CHACHA20: @@ -97,6 +98,8 @@ create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k) static EverCrypt_Error_error_code create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) { + KRML_HOST_IGNORE(dst); + KRML_HOST_IGNORE(k); #if HACL_CAN_COMPILE_VALE bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); @@ -108,8 +111,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)480U, sizeof (uint8_t)); uint8_t *keys_b = ek; uint8_t *hkeys_b = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b); - uint64_t scrut0 = aes128_keyhash_init(keys_b, hkeys_b); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b, hkeys_b)); EverCrypt_AEAD_state_s *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s)); p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }); @@ -125,6 +128,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) static EverCrypt_Error_error_code create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) { + KRML_HOST_IGNORE(dst); + KRML_HOST_IGNORE(k); #if HACL_CAN_COMPILE_VALE bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); @@ -136,8 +141,8 @@ create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)544U, sizeof (uint8_t)); uint8_t *keys_b = ek; uint8_t *hkeys_b = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b); - uint64_t scrut0 = aes256_keyhash_init(keys_b, hkeys_b); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b, hkeys_b)); EverCrypt_AEAD_state_s *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s)); p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }); @@ -203,6 +208,15 @@ encrypt_aes128_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -212,8 +226,7 @@ encrypt_aes128_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)304U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -223,8 +236,12 @@ encrypt_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -250,9 +267,7 @@ encrypt_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -268,7 +283,7 @@ encrypt_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -281,9 +296,7 @@ encrypt_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -299,7 +312,7 @@ encrypt_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, @@ -327,6 +340,15 @@ encrypt_aes256_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -336,8 +358,7 @@ encrypt_aes256_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)368U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -347,8 +368,12 @@ encrypt_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -374,9 +399,7 @@ encrypt_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -392,7 +415,7 @@ encrypt_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -405,9 +428,7 @@ encrypt_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -423,7 +444,7 @@ encrypt_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, @@ -525,27 +546,34 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut0 = aes128_key_expansion(k, keys_b0); - uint64_t scrut1 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -555,8 +583,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -582,9 +614,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -600,7 +630,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -613,9 +643,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -631,12 +659,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; #else @@ -669,27 +697,34 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut0 = aes256_key_expansion(k, keys_b0); - uint64_t scrut1 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -699,8 +734,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -726,9 +765,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -744,7 +781,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -757,9 +794,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -775,12 +810,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; #else @@ -805,6 +840,15 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -816,23 +860,21 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut0 = aes128_key_expansion(k, keys_b0); - uint64_t scrut1 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -842,8 +884,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -869,9 +915,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -887,7 +931,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -900,9 +944,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -918,12 +960,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; } @@ -946,6 +988,15 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -957,23 +1008,21 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut0 = aes256_key_expansion(k, keys_b0); - uint64_t scrut1 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -983,8 +1032,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1010,9 +1063,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -1028,7 +1079,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -1041,9 +1092,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -1059,12 +1108,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; } @@ -1087,12 +1136,12 @@ EverCrypt_AEAD_encrypt_expand_chacha20_poly1305( uint8_t *tag ) { + KRML_HOST_IGNORE(iv_len); uint8_t ek[32U] = { 0U }; EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek }; memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t)); EverCrypt_AEAD_state_s *s = &p; - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; EverCrypt_Chacha20Poly1305_aead_encrypt(ek0, iv, ad_len, ad, plain_len, plain, cipher, tag); return EverCrypt_Error_Success; } @@ -1173,6 +1222,15 @@ decrypt_aes128_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -1182,8 +1240,7 @@ decrypt_aes128_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)304U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -1193,8 +1250,12 @@ decrypt_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1222,7 +1283,7 @@ decrypt_aes128_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut0 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1240,7 +1301,6 @@ decrypt_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } else @@ -1255,7 +1315,7 @@ decrypt_aes128_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut0 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1273,7 +1333,6 @@ decrypt_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1307,6 +1366,15 @@ decrypt_aes256_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -1316,8 +1384,7 @@ decrypt_aes256_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)368U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -1327,8 +1394,12 @@ decrypt_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1356,7 +1427,7 @@ decrypt_aes256_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut0 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1374,7 +1445,6 @@ decrypt_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } else @@ -1389,7 +1459,7 @@ decrypt_aes256_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut0 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1407,7 +1477,6 @@ decrypt_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1449,8 +1518,7 @@ decrypt_chacha20_poly1305( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint32_t r = EverCrypt_Chacha20Poly1305_aead_decrypt(ek, iv, ad_len, ad, cipher_len, dst, cipher, tag); if (r == (uint32_t)0U) @@ -1508,8 +1576,7 @@ EverCrypt_AEAD_decrypt( { return EverCrypt_Error_InvalidKey; } - EverCrypt_AEAD_state_s scrut = *s; - Spec_Cipher_Expansion_impl i = scrut.impl; + Spec_Cipher_Expansion_impl i = (*s).impl; switch (i) { case Spec_Cipher_Expansion_Vale_AES128: @@ -1553,12 +1620,21 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b0); - uint64_t scrut0 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -1569,8 +1645,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -1580,8 +1655,12 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1609,7 +1688,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1627,7 +1706,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -1642,7 +1720,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1660,7 +1738,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1702,12 +1779,21 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b0); - uint64_t scrut0 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -1718,8 +1804,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -1729,8 +1814,12 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1758,7 +1847,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1776,7 +1865,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -1791,7 +1879,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1809,7 +1897,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1843,6 +1930,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -1854,8 +1950,8 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b0); - uint64_t scrut0 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -1866,8 +1962,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -1877,8 +1972,12 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1906,7 +2005,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1924,7 +2023,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -1939,7 +2037,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1957,7 +2055,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1989,6 +2086,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -2000,8 +2106,8 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b0); - uint64_t scrut0 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -2012,8 +2118,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -2023,8 +2128,12 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -2052,7 +2161,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -2070,7 +2179,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -2085,7 +2193,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -2103,7 +2211,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -2214,8 +2321,7 @@ Cleanup and free the AEAD state. */ void EverCrypt_AEAD_free(EverCrypt_AEAD_state_s *s) { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; KRML_HOST_FREE(ek); KRML_HOST_FREE(s); } diff --git a/src/EverCrypt_AutoConfig2.c b/src/EverCrypt_AutoConfig2.c index fe93ef8a..b549d020 100644 --- a/src/EverCrypt_AutoConfig2.c +++ b/src/EverCrypt_AutoConfig2.c @@ -113,75 +113,59 @@ void EverCrypt_AutoConfig2_recall(void) void EverCrypt_AutoConfig2_init(void) { #if HACL_CAN_COMPILE_VALE - uint64_t scrut = check_aesni(); - if (scrut != (uint64_t)0U) + if (check_aesni() != (uint64_t)0U) { cpu_has_aesni[0U] = true; cpu_has_pclmulqdq[0U] = true; } - uint64_t scrut0 = check_sha(); - if (scrut0 != (uint64_t)0U) + if (check_sha() != (uint64_t)0U) { cpu_has_shaext[0U] = true; } - uint64_t scrut1 = check_adx_bmi2(); - if (scrut1 != (uint64_t)0U) + if (check_adx_bmi2() != (uint64_t)0U) { cpu_has_bmi2[0U] = true; cpu_has_adx[0U] = true; } - uint64_t scrut2 = check_avx(); - if (scrut2 != (uint64_t)0U) + if (check_avx() != (uint64_t)0U) { - uint64_t scrut3 = check_osxsave(); - if (scrut3 != (uint64_t)0U) + if (check_osxsave() != (uint64_t)0U) { - uint64_t scrut4 = check_avx_xcr0(); - if (scrut4 != (uint64_t)0U) + if (check_avx_xcr0() != (uint64_t)0U) { cpu_has_avx[0U] = true; } } } - uint64_t scrut3 = check_avx2(); - if (scrut3 != (uint64_t)0U) + if (check_avx2() != (uint64_t)0U) { - uint64_t scrut4 = check_osxsave(); - if (scrut4 != (uint64_t)0U) + if (check_osxsave() != (uint64_t)0U) { - uint64_t scrut5 = check_avx_xcr0(); - if (scrut5 != (uint64_t)0U) + if (check_avx_xcr0() != (uint64_t)0U) { cpu_has_avx2[0U] = true; } } } - uint64_t scrut4 = check_sse(); - if (scrut4 != (uint64_t)0U) + if (check_sse() != (uint64_t)0U) { cpu_has_sse[0U] = true; } - uint64_t scrut5 = check_movbe(); - if (scrut5 != (uint64_t)0U) + if (check_movbe() != (uint64_t)0U) { cpu_has_movbe[0U] = true; } - uint64_t scrut6 = check_rdrand(); - if (scrut6 != (uint64_t)0U) + if (check_rdrand() != (uint64_t)0U) { cpu_has_rdrand[0U] = true; } - uint64_t scrut7 = check_avx512(); - if (scrut7 != (uint64_t)0U) + if (check_avx512() != (uint64_t)0U) { - uint64_t scrut8 = check_osxsave(); - if (scrut8 != (uint64_t)0U) + if (check_osxsave() != (uint64_t)0U) { - uint64_t scrut9 = check_avx_xcr0(); - if (scrut9 != (uint64_t)0U) + if (check_avx_xcr0() != (uint64_t)0U) { - uint64_t scrut10 = check_avx512_xcr0(); - if (scrut10 != (uint64_t)0U) + if (check_avx512_xcr0() != (uint64_t)0U) { cpu_has_avx512[0U] = true; return; diff --git a/src/EverCrypt_DRBG.c b/src/EverCrypt_DRBG.c index f21313e9..13e517e5 100644 --- a/src/EverCrypt_DRBG.c +++ b/src/EverCrypt_DRBG.c @@ -92,6 +92,7 @@ EverCrypt_DRBG_uu___is_SHA1_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA1_s) { return true; @@ -105,6 +106,7 @@ EverCrypt_DRBG_uu___is_SHA2_256_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA2_256_s) { return true; @@ -118,6 +120,7 @@ EverCrypt_DRBG_uu___is_SHA2_384_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA2_384_s) { return true; @@ -131,6 +134,7 @@ EverCrypt_DRBG_uu___is_SHA2_512_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA2_512_s) { return true; diff --git a/src/EverCrypt_Hash.c b/src/EverCrypt_Hash.c index 914a105f..b88df9e2 100644 --- a/src/EverCrypt_Hash.c +++ b/src/EverCrypt_Hash.c @@ -399,7 +399,7 @@ void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n) if (has_shaext && has_sse) { uint64_t n1 = (uint64_t)n; - uint64_t scrut = sha256_update(s, blocks, n1, k224_256); + KRML_HOST_IGNORE(sha256_update(s, blocks, n1, k224_256)); return; } Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s); @@ -2156,8 +2156,7 @@ Perform a run-time test to determine which algorithm was chosen for the given pi Spec_Hash_Definitions_hash_alg EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_hash_state *s) { - EverCrypt_Hash_Incremental_hash_state scrut = *s; - EverCrypt_Hash_state_s *block_state = scrut.block_state; + EverCrypt_Hash_state_s *block_state = (*s).block_state; return alg_of_state(block_state); } diff --git a/src/EverCrypt_Poly1305.c b/src/EverCrypt_Poly1305.c index 717b9527..454c0fce 100644 --- a/src/EverCrypt_Poly1305.c +++ b/src/EverCrypt_Poly1305.c @@ -28,8 +28,13 @@ #include "internal/Vale.h" #include "config.h" -static void poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key) +KRML_MAYBE_UNUSED static void +poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key) { + KRML_HOST_IGNORE(dst); + KRML_HOST_IGNORE(src); + KRML_HOST_IGNORE(len); + KRML_HOST_IGNORE(key); #if HACL_CAN_COMPILE_VALE uint8_t ctx[192U] = { 0U }; memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t)); @@ -38,19 +43,16 @@ static void poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key uint8_t tmp[16U] = { 0U }; if (n_extra == (uint32_t)0U) { - uint64_t scrut = x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U); - KRML_HOST_IGNORE((void *)(uint8_t)0U); + KRML_HOST_IGNORE(x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U)); } else { uint32_t len16 = n_blocks * (uint32_t)16U; uint8_t *src16 = src; memcpy(tmp, src + len16, n_extra * sizeof (uint8_t)); - uint64_t scrut = x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U); - KRML_HOST_IGNORE((void *)(uint8_t)0U); + KRML_HOST_IGNORE(x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U)); memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t)); - uint64_t scrut0 = x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U); - KRML_HOST_IGNORE((void *)(uint8_t)0U); + KRML_HOST_IGNORE(x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U)); } memcpy(dst, ctx, (uint32_t)16U * sizeof (uint8_t)); #endif diff --git a/src/Hacl_Chacha20_Vec128.c b/src/Hacl_Chacha20_Vec128.c index ed112654..1e0c4ec1 100644 --- a/src/Hacl_Chacha20_Vec128.c +++ b/src/Hacl_Chacha20_Vec128.c @@ -370,9 +370,8 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)256U; - uint8_t *uu____3 = text + nb * (uint32_t)256U; uint8_t plain[256U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)256U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; chacha20_core_128(k, ctx, nb); Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; @@ -676,9 +675,8 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)256U; - uint8_t *uu____3 = cipher + nb * (uint32_t)256U; uint8_t plain[256U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)256U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; chacha20_core_128(k, ctx, nb); Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; diff --git a/src/Hacl_Chacha20_Vec256.c b/src/Hacl_Chacha20_Vec256.c index 2df300b6..620f5040 100644 --- a/src/Hacl_Chacha20_Vec256.c +++ b/src/Hacl_Chacha20_Vec256.c @@ -470,9 +470,8 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)512U; - uint8_t *uu____3 = text + nb * (uint32_t)512U; uint8_t plain[512U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)512U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; chacha20_core_256(k, ctx, nb); Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; @@ -968,9 +967,8 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)512U; - uint8_t *uu____3 = cipher + nb * (uint32_t)512U; uint8_t plain[512U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)512U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; chacha20_core_256(k, ctx, nb); Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; diff --git a/src/Hacl_Chacha20_Vec32.c b/src/Hacl_Chacha20_Vec32.c index 6f137f39..2bf4764c 100644 --- a/src/Hacl_Chacha20_Vec32.c +++ b/src/Hacl_Chacha20_Vec32.c @@ -229,9 +229,8 @@ Hacl_Chacha20_Vec32_chacha20_encrypt_32( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = text + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k[16U] = { 0U }; chacha20_core_32(k, ctx, nb); KRML_MAYBE_FOR16(i, @@ -279,9 +278,8 @@ Hacl_Chacha20_Vec32_chacha20_decrypt_32( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = cipher + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k[16U] = { 0U }; chacha20_core_32(k, ctx, nb); KRML_MAYBE_FOR16(i, diff --git a/src/Hacl_Curve25519_64.c b/src/Hacl_Curve25519_64.c index 526fbd22..fb0974fe 100644 --- a/src/Hacl_Curve25519_64.c +++ b/src/Hacl_Curve25519_64.c @@ -35,7 +35,7 @@ static inline void add_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2) #if HACL_CAN_COMPILE_INLINE_ASM add_scalar(out, f1, f2); #else - uint64_t uu____0 = add_scalar_e(out, f1, f2); + KRML_HOST_IGNORE(add_scalar_e(out, f1, f2)); #endif } @@ -44,7 +44,7 @@ static inline void fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2) #if HACL_CAN_COMPILE_INLINE_ASM fadd(out, f1, f2); #else - uint64_t uu____0 = fadd_e(out, f1, f2); + KRML_HOST_IGNORE(fadd_e(out, f1, f2)); #endif } @@ -53,7 +53,7 @@ static inline void fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2) #if HACL_CAN_COMPILE_INLINE_ASM fsub(out, f1, f2); #else - uint64_t uu____0 = fsub_e(out, f1, f2); + KRML_HOST_IGNORE(fsub_e(out, f1, f2)); #endif } @@ -62,7 +62,7 @@ static inline void fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tm #if HACL_CAN_COMPILE_INLINE_ASM fmul(out, f1, f2, tmp); #else - uint64_t uu____0 = fmul_e(tmp, f1, out, f2); + KRML_HOST_IGNORE(fmul_e(tmp, f1, out, f2)); #endif } @@ -71,7 +71,7 @@ static inline void fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *t #if HACL_CAN_COMPILE_INLINE_ASM fmul2(out, f1, f2, tmp); #else - uint64_t uu____0 = fmul2_e(tmp, f1, out, f2); + KRML_HOST_IGNORE(fmul2_e(tmp, f1, out, f2)); #endif } @@ -80,7 +80,7 @@ static inline void fmul_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2) #if HACL_CAN_COMPILE_INLINE_ASM fmul_scalar(out, f1, f2); #else - uint64_t uu____0 = fmul_scalar_e(out, f1, f2); + KRML_HOST_IGNORE(fmul_scalar_e(out, f1, f2)); #endif } @@ -89,7 +89,7 @@ static inline void fsqr0(uint64_t *out, uint64_t *f1, uint64_t *tmp) #if HACL_CAN_COMPILE_INLINE_ASM fsqr(out, f1, tmp); #else - uint64_t uu____0 = fsqr_e(tmp, f1, out); + KRML_HOST_IGNORE(fsqr_e(tmp, f1, out)); #endif } @@ -98,7 +98,7 @@ static inline void fsqr20(uint64_t *out, uint64_t *f, uint64_t *tmp) #if HACL_CAN_COMPILE_INLINE_ASM fsqr2(out, f, tmp); #else - uint64_t uu____0 = fsqr2_e(tmp, f, out); + KRML_HOST_IGNORE(fsqr2_e(tmp, f, out)); #endif } @@ -107,7 +107,7 @@ static inline void cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2) #if HACL_CAN_COMPILE_INLINE_ASM cswap2(bit, p1, p2); #else - uint64_t uu____0 = cswap2_e(bit, p1, p2); + KRML_HOST_IGNORE(cswap2_e(bit, p1, p2)); #endif } diff --git a/src/Hacl_Ed25519.c b/src/Hacl_Ed25519.c index 9d7c3bd4..f9881e91 100644 --- a/src/Hacl_Ed25519.c +++ b/src/Hacl_Ed25519.c @@ -711,65 +711,53 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) FStar_UInt128_uint128 c00 = carry0; FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U); - uint64_t - t100 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z11, c00)) - & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c10 = carry1; FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U); - uint64_t - t101 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z21, c10)) - & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c20 = carry2; FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U); - uint64_t - t102 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z31, c20)) - & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c30 = carry3; FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U); uint64_t - t103 = + t100 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c40 = carry4; - uint64_t t410 = t103; + uint64_t t410 = t100; FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U); uint64_t - t104 = + t101 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c5 = carry5; - uint64_t t51 = t104; + uint64_t t51 = t101; FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U); uint64_t - t105 = + t102 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c6 = carry6; - uint64_t t61 = t105; + uint64_t t61 = t102; FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U); uint64_t - t106 = + t103 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c7 = carry7; - uint64_t t71 = t106; + uint64_t t71 = t103; FStar_UInt128_uint128 carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U); uint64_t - t107 = + t104 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c8 = carry8; - uint64_t t81 = t107; + uint64_t t81 = t104; uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8); uint64_t qmu4_ = t410; uint64_t qmu5_ = t51; @@ -818,19 +806,19 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1); FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0); FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U); - uint64_t t108 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU; + uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c0 = carry9; - uint64_t t010 = t108; + uint64_t t010 = t105; FStar_UInt128_uint128 carry10 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0), (uint32_t)56U); uint64_t - t109 = + t106 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c11 = carry10; - uint64_t t110 = t109; + uint64_t t110 = t106; FStar_UInt128_uint128 carry11 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, @@ -839,14 +827,14 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) c11), (uint32_t)56U); uint64_t - t1010 = + t107 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, xy11), xy20), c11)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c21 = carry11; - uint64_t t210 = t1010; + uint64_t t210 = t107; FStar_UInt128_uint128 carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, @@ -856,7 +844,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) c21), (uint32_t)56U); uint64_t - t1011 = + t108 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, xy12), xy21), @@ -864,7 +852,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) c21)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c31 = carry; - uint64_t t310 = t1011; + uint64_t t310 = t108; uint64_t t411 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy04, @@ -880,24 +868,24 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) uint64_t qmul3 = t310; uint64_t qmul4 = t411; uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U; - uint64_t t1012 = (b5 << (uint32_t)56U) + r0 - qmul0; + uint64_t t109 = (b5 << (uint32_t)56U) + r0 - qmul0; uint64_t c1 = b5; - uint64_t t011 = t1012; + uint64_t t011 = t109; uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U; - uint64_t t1013 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1); + uint64_t t1010 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1); uint64_t c2 = b6; - uint64_t t111 = t1013; + uint64_t t111 = t1010; uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U; - uint64_t t1014 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2); + uint64_t t1011 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2); uint64_t c3 = b7; - uint64_t t211 = t1014; + uint64_t t211 = t1011; uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U; - uint64_t t1015 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3); + uint64_t t1012 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3); uint64_t c4 = b8; - uint64_t t311 = t1015; + uint64_t t311 = t1012; uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U; - uint64_t t1016 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4); - uint64_t t412 = t1016; + uint64_t t1013 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4); + uint64_t t412 = t1013; uint64_t s0 = t011; uint64_t s1 = t111; uint64_t s2 = t211; @@ -914,21 +902,21 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) uint64_t y3 = m31; uint64_t y4 = m41; uint64_t b10 = (s0 - y0) >> (uint32_t)63U; - uint64_t t1017 = (b10 << (uint32_t)56U) + s0 - y0; + uint64_t t1014 = (b10 << (uint32_t)56U) + s0 - y0; uint64_t b0 = b10; - uint64_t t01 = t1017; + uint64_t t01 = t1014; uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U; - uint64_t t1018 = (b11 << (uint32_t)56U) + s1 - (y1 + b0); + uint64_t t1015 = (b11 << (uint32_t)56U) + s1 - (y1 + b0); uint64_t b1 = b11; - uint64_t t11 = t1018; + uint64_t t11 = t1015; uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U; - uint64_t t1019 = (b12 << (uint32_t)56U) + s2 - (y2 + b1); + uint64_t t1016 = (b12 << (uint32_t)56U) + s2 - (y2 + b1); uint64_t b2 = b12; - uint64_t t21 = t1019; + uint64_t t21 = t1016; uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U; - uint64_t t1020 = (b13 << (uint32_t)56U) + s3 - (y3 + b2); + uint64_t t1017 = (b13 << (uint32_t)56U) + s3 - (y3 + b2); uint64_t b3 = b13; - uint64_t t31 = t1020; + uint64_t t31 = t1017; uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U; uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3); uint64_t b4 = b; diff --git a/src/Hacl_FFDHE.c b/src/Hacl_FFDHE.c index 78aaaab6..9cf2ddfb 100644 --- a/src/Hacl_FFDHE.c +++ b/src/Hacl_FFDHE.c @@ -127,7 +127,6 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui memset(p_n1, 0U, nLen * sizeof (uint64_t)); uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, p_n[0U], (uint64_t)1U, p_n1); - uint64_t c1; if ((uint32_t)1U < nLen) { uint64_t *a1 = p_n + (uint32_t)1U; @@ -159,12 +158,12 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i); } - uint64_t c10 = c; - c1 = c10; + uint64_t c1 = c; + KRML_HOST_IGNORE(c1); } else { - c1 = c0; + KRML_HOST_IGNORE(c0); } KRML_CHECK_SIZE(sizeof (uint64_t), nLen); uint64_t b2[nLen]; diff --git a/src/Hacl_Frodo_KEM.c b/src/Hacl_Frodo_KEM.c index 13db363a..4265ac0e 100644 --- a/src/Hacl_Frodo_KEM.c +++ b/src/Hacl_Frodo_KEM.c @@ -30,6 +30,6 @@ void randombytes_(uint32_t len, uint8_t *res) { - bool b = Lib_RandomBuffer_System_randombytes(res, len); + KRML_HOST_IGNORE(Lib_RandomBuffer_System_randombytes(res, len)); } diff --git a/src/Hacl_HMAC_DRBG.c b/src/Hacl_HMAC_DRBG.c index 181a8ef4..0a09aaed 100644 --- a/src/Hacl_HMAC_DRBG.c +++ b/src/Hacl_HMAC_DRBG.c @@ -71,6 +71,8 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a) bool Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee) { + KRML_HOST_IGNORE(a); + KRML_HOST_IGNORE(projectee); return true; } @@ -1084,6 +1086,7 @@ Hacl_HMAC_DRBG_generate( void Hacl_HMAC_DRBG_free(Spec_Hash_Definitions_hash_alg uu___, Hacl_HMAC_DRBG_state s) { + KRML_HOST_IGNORE(uu___); uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; diff --git a/src/Hacl_Hash_Blake2.c b/src/Hacl_Hash_Blake2.c index 194e7157..aecc6165 100644 --- a/src/Hacl_Hash_Blake2.c +++ b/src/Hacl_Hash_Blake2.c @@ -545,6 +545,7 @@ Hacl_Blake2b_32_blake2b_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { FStar_UInt128_uint128 @@ -1192,6 +1193,7 @@ Hacl_Blake2s_32_blake2s_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U); diff --git a/src/Hacl_Hash_Blake2b_256.c b/src/Hacl_Hash_Blake2b_256.c index d0df7cd8..b37ffc5f 100644 --- a/src/Hacl_Hash_Blake2b_256.c +++ b/src/Hacl_Hash_Blake2b_256.c @@ -268,6 +268,7 @@ Hacl_Blake2b_256_blake2b_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { FStar_UInt128_uint128 diff --git a/src/Hacl_Hash_Blake2s_128.c b/src/Hacl_Hash_Blake2s_128.c index 5bf06711..86c4f030 100644 --- a/src/Hacl_Hash_Blake2s_128.c +++ b/src/Hacl_Hash_Blake2s_128.c @@ -268,6 +268,7 @@ Hacl_Blake2s_128_blake2s_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U); diff --git a/src/Hacl_Hash_MD5.c b/src/Hacl_Hash_MD5.c index 1b376960..222ac824 100644 --- a/src/Hacl_Hash_MD5.c +++ b/src/Hacl_Hash_MD5.c @@ -1218,7 +1218,6 @@ void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Hash_Core_MD5_legacy_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/Hacl_Hash_SHA1.c b/src/Hacl_Hash_SHA1.c index 80edc004..5ecb3c0b 100644 --- a/src/Hacl_Hash_SHA1.c +++ b/src/Hacl_Hash_SHA1.c @@ -254,7 +254,6 @@ void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Hash_Core_SHA1_legacy_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/Hacl_Hash_SHA2.c b/src/Hacl_Hash_SHA2.c index 46fde83f..c93c3616 100644 --- a/src/Hacl_Hash_SHA2.c +++ b/src/Hacl_Hash_SHA2.c @@ -537,7 +537,6 @@ void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha256_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -836,7 +835,6 @@ void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha224_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -962,7 +960,6 @@ void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s) Hacl_Streaming_MD_state_64 scrut = *s; uint8_t *buf = scrut.buf; uint64_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha512_init(block_state); Hacl_Streaming_MD_state_64 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -1262,7 +1259,6 @@ void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s) Hacl_Streaming_MD_state_64 scrut = *s; uint8_t *buf = scrut.buf; uint64_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha384_init(block_state); Hacl_Streaming_MD_state_64 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/Hacl_Hash_SHA3.c b/src/Hacl_Hash_SHA3.c index 5f4707f4..19d13b1b 100644 --- a/src/Hacl_Hash_SHA3.c +++ b/src/Hacl_Hash_SHA3.c @@ -125,10 +125,9 @@ Hacl_Hash_SHA3_update_last_sha3( if (input_len == len) { Hacl_Impl_SHA3_absorb_inner(len, input, s); - uint8_t *uu____0 = input + input_len; uint8_t lastBlock_[200U] = { 0U }; uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, uu____0, (uint32_t)0U * sizeof (uint8_t)); + memcpy(lastBlock, input + input_len, (uint32_t)0U * sizeof (uint8_t)); lastBlock[0U] = suffix; Hacl_Impl_SHA3_loadState(len, lastBlock, s); if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U) @@ -167,8 +166,7 @@ hash_buf2; Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s) { - Hacl_Streaming_Keccak_state scrut = *s; - Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state; + Hacl_Streaming_Keccak_hash_buf block_state = (*s).block_state; return block_state.fst; } @@ -809,6 +807,7 @@ Hacl_Impl_SHA3_keccak( uint8_t *output ) { + KRML_HOST_IGNORE(capacity); uint32_t rateInBytes = rate / (uint32_t)8U; uint64_t s[25U] = { 0U }; absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix); diff --git a/src/Hacl_K256_ECDSA.c b/src/Hacl_K256_ECDSA.c index fb53f3fd..2ffc1060 100644 --- a/src/Hacl_K256_ECDSA.c +++ b/src/Hacl_K256_ECDSA.c @@ -498,7 +498,7 @@ mul_pow2_256_minus_q_add( uint64_t r = c; tmp[len + i0] = r;); memcpy(res + (uint32_t)2U, a, len * sizeof (uint64_t)); - uint64_t uu____0 = bn_add(resLen, res, len + (uint32_t)2U, tmp, res); + KRML_HOST_IGNORE(bn_add(resLen, res, len + (uint32_t)2U, tmp, res)); uint64_t c = bn_add(resLen, res, (uint32_t)4U, e, res); return c; } @@ -514,15 +514,23 @@ static inline void modq(uint64_t *out, uint64_t *a) uint64_t *t01 = tmp; uint64_t m[7U] = { 0U }; uint64_t p[5U] = { 0U }; - uint64_t - c0 = mul_pow2_256_minus_q_add((uint32_t)4U, (uint32_t)7U, t01, a + (uint32_t)4U, a, m); - uint64_t - c10 = mul_pow2_256_minus_q_add((uint32_t)3U, (uint32_t)5U, t01, m + (uint32_t)4U, m, p); + KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)4U, + (uint32_t)7U, + t01, + a + (uint32_t)4U, + a, + m)); + KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)3U, + (uint32_t)5U, + t01, + m + (uint32_t)4U, + m, + p)); uint64_t c2 = mul_pow2_256_minus_q_add((uint32_t)1U, (uint32_t)4U, t01, p + (uint32_t)4U, p, r); - uint64_t c00 = c2; + uint64_t c0 = c2; uint64_t c1 = add4(r, tmp, out); - uint64_t mask = (uint64_t)0U - (c00 + c1); + uint64_t mask = (uint64_t)0U - (c0 + c1); KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, @@ -612,7 +620,7 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b) uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);); uint64_t c1 = c; - uint64_t uu____0 = c1; + KRML_HOST_IGNORE(c1); uint64_t flag = l[5U] >> (uint32_t)63U; uint64_t mask = (uint64_t)0U - flag; KRML_MAYBE_FOR4(i, @@ -1223,6 +1231,7 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar) (uint64_t)118285133003718U, (uint64_t)434519962075150U, (uint64_t)1114612377498854U, (uint64_t)3488596944003813U, (uint64_t)450716531072892U, (uint64_t)66044973203836U }; + KRML_HOST_IGNORE(q2); uint64_t q3[15U] = { @@ -1232,6 +1241,7 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar) (uint64_t)265969268774814U, (uint64_t)1913228635640715U, (uint64_t)2831959046949342U, (uint64_t)888030405442963U, (uint64_t)1817092932985033U, (uint64_t)101515844997121U }; + KRML_HOST_IGNORE(q3); uint64_t q4[15U] = { @@ -1241,6 +1251,7 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar) (uint64_t)12245672982162U, (uint64_t)2119364213800870U, (uint64_t)2034960311715107U, (uint64_t)3172697815804487U, (uint64_t)4185144850224160U, (uint64_t)2792055915674U }; + KRML_HOST_IGNORE(q4); uint64_t *r1 = scalar; uint64_t *r2 = scalar + (uint32_t)1U; uint64_t *r3 = scalar + (uint32_t)2U; @@ -1605,6 +1616,7 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg( ) { uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U }; + KRML_HOST_IGNORE(oneq); uint64_t rsdk_q[16U] = { 0U }; uint64_t *r_q = rsdk_q; uint64_t *s_q = rsdk_q + (uint32_t)4U; diff --git a/src/Hacl_RSAPSS.c b/src/Hacl_RSAPSS.c index 19d4e5b4..ceb9a6f0 100644 --- a/src/Hacl_RSAPSS.c +++ b/src/Hacl_RSAPSS.c @@ -404,9 +404,9 @@ load_skey( Sign a message `msg` and write the signature to `sgnt`. @param a Hash algorithm to use. Allowed values for `a` are ... - * Spec_Hash_Definitions_SHA2_256, - * Spec_Hash_Definitions_SHA2_384, and - * Spec_Hash_Definitions_SHA2_512. + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. @@ -518,7 +518,10 @@ Hacl_RSAPSS_rsapss_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param pkey Pointer to public key created by `Hacl_RSAPSS_new_rsapss_load_pkey`. @@ -637,10 +640,10 @@ Load a public key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. -@return Returns an allocated public key. Note: caller must take care to `free()` the created key. +@return Returns an allocated public key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb) @@ -707,11 +710,11 @@ Load a secret key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. -@return Returns an allocated secret key. Note: caller must take care to `free()` the created key. +@return Returns an allocated secret key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_skey( @@ -804,13 +807,16 @@ uint64_t /** Sign a message `msg` and write the signature to `sgnt`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param salt Pointer to `saltLen` bytes where the salt is read from. @param msgLen Length of message. @@ -873,11 +879,14 @@ Hacl_RSAPSS_rsapss_skey_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param sgntLen Length of signature. @param sgnt Pointer to `sgntLen` bytes where the signature is read from. diff --git a/src/Hacl_Salsa20.c b/src/Hacl_Salsa20.c index e157d5ef..2758f8a4 100644 --- a/src/Hacl_Salsa20.c +++ b/src/Hacl_Salsa20.c @@ -181,6 +181,7 @@ salsa20_encrypt( memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t)); ctx[15U] = (uint32_t)0x6b206574U; uint32_t k[16U] = { 0U }; + KRML_HOST_IGNORE(k); uint32_t rem = len % (uint32_t)64U; uint32_t nb = len / (uint32_t)64U; uint32_t rem1 = len % (uint32_t)64U; @@ -217,9 +218,8 @@ salsa20_encrypt( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = text + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k1[16U] = { 0U }; salsa20_core(k1, ctx, nb); uint32_t bl[16U] = { 0U }; @@ -294,6 +294,7 @@ salsa20_decrypt( memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t)); ctx[15U] = (uint32_t)0x6b206574U; uint32_t k[16U] = { 0U }; + KRML_HOST_IGNORE(k); uint32_t rem = len % (uint32_t)64U; uint32_t nb = len / (uint32_t)64U; uint32_t rem1 = len % (uint32_t)64U; @@ -330,9 +331,8 @@ salsa20_decrypt( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = cipher + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k1[16U] = { 0U }; salsa20_core(k1, ctx, nb); uint32_t bl[16U] = { 0U }; diff --git a/src/Hacl_Streaming_Blake2.c b/src/Hacl_Streaming_Blake2.c index 4faa859e..948d56c2 100644 --- a/src/Hacl_Streaming_Blake2.c +++ b/src/Hacl_Streaming_Blake2.c @@ -54,7 +54,6 @@ void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_ Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); Hacl_Streaming_Blake2_blake2s_32_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -354,7 +353,6 @@ void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_ Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); Hacl_Streaming_Blake2_blake2b_32_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/Hacl_Streaming_Blake2b_256.c b/src/Hacl_Streaming_Blake2b_256.c index d2df234a..bdb5433f 100644 --- a/src/Hacl_Streaming_Blake2b_256.c +++ b/src/Hacl_Streaming_Blake2b_256.c @@ -66,7 +66,6 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init( Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); Hacl_Streaming_Blake2b_256_blake2b_256_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/Hacl_Streaming_Blake2s_128.c b/src/Hacl_Streaming_Blake2s_128.c index eaace7ce..f97bf5d0 100644 --- a/src/Hacl_Streaming_Blake2s_128.c +++ b/src/Hacl_Streaming_Blake2s_128.c @@ -66,7 +66,6 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init( Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); Hacl_Streaming_Blake2s_128_blake2s_128_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/Hacl_Streaming_Poly1305_128.c b/src/Hacl_Streaming_Poly1305_128.c index c752cfb0..c3f7c19a 100644 --- a/src/Hacl_Streaming_Poly1305_128.c +++ b/src/Hacl_Streaming_Poly1305_128.c @@ -58,7 +58,6 @@ Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly130 uint8_t *k_ = scrut.p_key; uint8_t *buf = scrut.buf; Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Poly1305_128_poly1305_init(block_state, k); memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); uint8_t *k_1 = k_; @@ -312,7 +311,7 @@ Hacl_Streaming_Poly1305_128_finish( { ite1 = r % (uint32_t)16U; } - uint64_t prev_len_last = total_len - (uint64_t)ite1; + KRML_HOST_IGNORE(total_len - (uint64_t)ite1); uint32_t ite2; if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) { diff --git a/src/Hacl_Streaming_Poly1305_256.c b/src/Hacl_Streaming_Poly1305_256.c index c1915ed9..e56275a4 100644 --- a/src/Hacl_Streaming_Poly1305_256.c +++ b/src/Hacl_Streaming_Poly1305_256.c @@ -58,7 +58,6 @@ Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly130 uint8_t *k_ = scrut.p_key; uint8_t *buf = scrut.buf; Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Poly1305_256_poly1305_init(block_state, k); memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); uint8_t *k_1 = k_; @@ -312,7 +311,7 @@ Hacl_Streaming_Poly1305_256_finish( { ite1 = r % (uint32_t)16U; } - uint64_t prev_len_last = total_len - (uint64_t)ite1; + KRML_HOST_IGNORE(total_len - (uint64_t)ite1); uint32_t ite2; if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) { diff --git a/src/Hacl_Streaming_Poly1305_32.c b/src/Hacl_Streaming_Poly1305_32.c index 89852727..249a622f 100644 --- a/src/Hacl_Streaming_Poly1305_32.c +++ b/src/Hacl_Streaming_Poly1305_32.c @@ -53,7 +53,6 @@ Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_ uint8_t *k_ = scrut.p_key; uint8_t *buf = scrut.buf; uint64_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Poly1305_32_poly1305_init(block_state, k); memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); uint8_t *k_1 = k_; diff --git a/src/msvc/EverCrypt_AEAD.c b/src/msvc/EverCrypt_AEAD.c index 564dbc2e..d3a4ffbe 100644 --- a/src/msvc/EverCrypt_AEAD.c +++ b/src/msvc/EverCrypt_AEAD.c @@ -46,6 +46,8 @@ The state may be reused as many times as desired. */ bool EverCrypt_AEAD_uu___is_Ek(Spec_Agile_AEAD_alg a, EverCrypt_AEAD_state_s projectee) { + KRML_HOST_IGNORE(a); + KRML_HOST_IGNORE(projectee); return true; } @@ -58,8 +60,7 @@ Return the algorithm used in the AEAD state. */ Spec_Agile_AEAD_alg EverCrypt_AEAD_alg_of_state(EverCrypt_AEAD_state_s *s) { - EverCrypt_AEAD_state_s scrut = *s; - Spec_Cipher_Expansion_impl impl = scrut.impl; + Spec_Cipher_Expansion_impl impl = (*s).impl; switch (impl) { case Spec_Cipher_Expansion_Hacl_CHACHA20: @@ -97,6 +98,8 @@ create_in_chacha20_poly1305(EverCrypt_AEAD_state_s **dst, uint8_t *k) static EverCrypt_Error_error_code create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) { + KRML_HOST_IGNORE(dst); + KRML_HOST_IGNORE(k); #if HACL_CAN_COMPILE_VALE bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); @@ -108,8 +111,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)480U, sizeof (uint8_t)); uint8_t *keys_b = ek; uint8_t *hkeys_b = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b); - uint64_t scrut0 = aes128_keyhash_init(keys_b, hkeys_b); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b, hkeys_b)); EverCrypt_AEAD_state_s *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s)); p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }); @@ -125,6 +128,8 @@ create_in_aes128_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) static EverCrypt_Error_error_code create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) { + KRML_HOST_IGNORE(dst); + KRML_HOST_IGNORE(k); #if HACL_CAN_COMPILE_VALE bool has_aesni = EverCrypt_AutoConfig2_has_aesni(); bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); @@ -136,8 +141,8 @@ create_in_aes256_gcm(EverCrypt_AEAD_state_s **dst, uint8_t *k) uint8_t *ek = (uint8_t *)KRML_HOST_CALLOC((uint32_t)544U, sizeof (uint8_t)); uint8_t *keys_b = ek; uint8_t *hkeys_b = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b); - uint64_t scrut0 = aes256_keyhash_init(keys_b, hkeys_b); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b, hkeys_b)); EverCrypt_AEAD_state_s *p = (EverCrypt_AEAD_state_s *)KRML_HOST_MALLOC(sizeof (EverCrypt_AEAD_state_s)); p[0U] = ((EverCrypt_AEAD_state_s){ .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }); @@ -203,6 +208,15 @@ encrypt_aes128_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -212,8 +226,7 @@ encrypt_aes128_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)304U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -223,8 +236,12 @@ encrypt_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -250,9 +267,7 @@ encrypt_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -268,7 +283,7 @@ encrypt_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -281,9 +296,7 @@ encrypt_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -299,7 +312,7 @@ encrypt_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, @@ -327,6 +340,15 @@ encrypt_aes256_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -336,8 +358,7 @@ encrypt_aes256_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)368U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -347,8 +368,12 @@ encrypt_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -374,9 +399,7 @@ encrypt_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut0 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -392,7 +415,7 @@ encrypt_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -405,9 +428,7 @@ encrypt_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut0 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -423,7 +444,7 @@ encrypt_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, @@ -525,27 +546,34 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut0 = aes128_key_expansion(k, keys_b0); - uint64_t scrut1 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -555,8 +583,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -582,9 +614,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -600,7 +630,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -613,9 +643,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -631,12 +659,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; #else @@ -669,27 +697,34 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut0 = aes256_key_expansion(k, keys_b0); - uint64_t scrut1 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -699,8 +734,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -726,9 +765,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -744,7 +781,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -757,9 +794,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -775,12 +810,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm_no_check( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; #else @@ -805,6 +840,15 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -816,23 +860,21 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut0 = aes128_key_expansion(k, keys_b0); - uint64_t scrut1 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -842,8 +884,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -869,9 +915,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -887,7 +931,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -900,9 +944,7 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm128_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm128_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -918,12 +960,12 @@ EverCrypt_AEAD_encrypt_expand_aes128_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; } @@ -946,6 +988,15 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint8_t *tag ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(plain); + KRML_HOST_IGNORE(plain_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(tag); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -957,23 +1008,21 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut0 = aes256_key_expansion(k, keys_b0); - uint64_t scrut1 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; - EverCrypt_Error_error_code r; if (s == NULL) { - r = EverCrypt_Error_InvalidKey; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidKey); } else if (iv_len == (uint32_t)0U) { - r = EverCrypt_Error_InvalidIVLength; + KRML_HOST_IGNORE(EverCrypt_Error_InvalidIVLength); } else { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -983,8 +1032,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1010,9 +1063,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -1028,7 +1079,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } else { @@ -1041,9 +1092,7 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( uint64_t auth_num = (uint64_t)ad_len / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; - uint64_t - scrut2 = - gcm256_encrypt_opt(auth_b_, + KRML_HOST_IGNORE(gcm256_encrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, keys_b, @@ -1059,12 +1108,12 @@ EverCrypt_AEAD_encrypt_expand_aes256_gcm( inout_b, (uint64_t)plain_len, scratch_b1, - tag); + tag)); } memcpy(cipher + (uint32_t)(uint64_t)plain_len / (uint32_t)16U * (uint32_t)16U, inout_b, (uint32_t)(uint64_t)plain_len % (uint32_t)16U * sizeof (uint8_t)); - r = EverCrypt_Error_Success; + KRML_HOST_IGNORE(EverCrypt_Error_Success); } return EverCrypt_Error_Success; } @@ -1087,12 +1136,12 @@ EverCrypt_AEAD_encrypt_expand_chacha20_poly1305( uint8_t *tag ) { + KRML_HOST_IGNORE(iv_len); uint8_t ek[32U] = { 0U }; EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Hacl_CHACHA20, .ek = ek }; memcpy(ek, k, (uint32_t)32U * sizeof (uint8_t)); EverCrypt_AEAD_state_s *s = &p; - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek0 = scrut.ek; + uint8_t *ek0 = (*s).ek; EverCrypt_Chacha20Poly1305_aead_encrypt(ek0, iv, ad_len, ad, plain_len, plain, cipher, tag); return EverCrypt_Error_Success; } @@ -1173,6 +1222,15 @@ decrypt_aes128_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -1182,8 +1240,7 @@ decrypt_aes128_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)304U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -1193,8 +1250,12 @@ decrypt_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1222,7 +1283,7 @@ decrypt_aes128_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut0 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1240,7 +1301,6 @@ decrypt_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } else @@ -1255,7 +1315,7 @@ decrypt_aes128_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut0 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1273,7 +1333,6 @@ decrypt_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1307,6 +1366,15 @@ decrypt_aes256_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(s); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE if (s == NULL) { @@ -1316,8 +1384,7 @@ decrypt_aes256_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint8_t *scratch_b = ek + (uint32_t)368U; uint8_t *ek1 = ek; uint8_t *keys_b = ek1; @@ -1327,8 +1394,12 @@ decrypt_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1356,7 +1427,7 @@ decrypt_aes256_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut0 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1374,7 +1445,6 @@ decrypt_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } else @@ -1389,7 +1459,7 @@ decrypt_aes256_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut0 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1407,7 +1477,6 @@ decrypt_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut0; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1449,8 +1518,7 @@ decrypt_chacha20_poly1305( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; uint32_t r = EverCrypt_Chacha20Poly1305_aead_decrypt(ek, iv, ad_len, ad, cipher_len, dst, cipher, tag); if (r == (uint32_t)0U) @@ -1508,8 +1576,7 @@ EverCrypt_AEAD_decrypt( { return EverCrypt_Error_InvalidKey; } - EverCrypt_AEAD_state_s scrut = *s; - Spec_Cipher_Expansion_impl i = scrut.impl; + Spec_Cipher_Expansion_impl i = (*s).impl; switch (i) { case Spec_Cipher_Expansion_Vale_AES128: @@ -1553,12 +1620,21 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b0); - uint64_t scrut0 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -1569,8 +1645,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -1580,8 +1655,12 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1609,7 +1688,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1627,7 +1706,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -1642,7 +1720,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1660,7 +1738,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1702,12 +1779,21 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b0); - uint64_t scrut0 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -1718,8 +1804,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -1729,8 +1814,12 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1758,7 +1847,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1776,7 +1865,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -1791,7 +1879,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1809,7 +1897,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm_no_check( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1843,6 +1930,15 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -1854,8 +1950,8 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint8_t ek[480U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)176U; - uint64_t scrut = aes128_key_expansion(k, keys_b0); - uint64_t scrut0 = aes128_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes128_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes128_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES128, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -1866,8 +1962,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)304U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -1877,8 +1972,12 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -1906,7 +2005,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1924,7 +2023,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -1939,7 +2037,7 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm128_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -1957,7 +2055,6 @@ EverCrypt_AEAD_decrypt_expand_aes128_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -1989,6 +2086,15 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint8_t *dst ) { + KRML_HOST_IGNORE(k); + KRML_HOST_IGNORE(iv); + KRML_HOST_IGNORE(iv_len); + KRML_HOST_IGNORE(ad); + KRML_HOST_IGNORE(ad_len); + KRML_HOST_IGNORE(cipher); + KRML_HOST_IGNORE(cipher_len); + KRML_HOST_IGNORE(tag); + KRML_HOST_IGNORE(dst); #if HACL_CAN_COMPILE_VALE bool has_pclmulqdq = EverCrypt_AutoConfig2_has_pclmulqdq(); bool has_avx = EverCrypt_AutoConfig2_has_avx(); @@ -2000,8 +2106,8 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint8_t ek[544U] = { 0U }; uint8_t *keys_b0 = ek; uint8_t *hkeys_b0 = ek + (uint32_t)240U; - uint64_t scrut = aes256_key_expansion(k, keys_b0); - uint64_t scrut0 = aes256_keyhash_init(keys_b0, hkeys_b0); + KRML_HOST_IGNORE(aes256_key_expansion(k, keys_b0)); + KRML_HOST_IGNORE(aes256_keyhash_init(keys_b0, hkeys_b0)); EverCrypt_AEAD_state_s p = { .impl = Spec_Cipher_Expansion_Vale_AES256, .ek = ek }; EverCrypt_AEAD_state_s *s = &p; if (s == NULL) @@ -2012,8 +2118,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( { return EverCrypt_Error_InvalidIVLength; } - EverCrypt_AEAD_state_s scrut1 = *s; - uint8_t *ek0 = scrut1.ek; + uint8_t *ek0 = (*s).ek; uint8_t *scratch_b = ek0 + (uint32_t)368U; uint8_t *ek1 = ek0; uint8_t *keys_b = ek1; @@ -2023,8 +2128,12 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint32_t bytes_len = len * (uint32_t)16U; uint8_t *iv_b = iv; memcpy(tmp_iv, iv + bytes_len, iv_len % (uint32_t)16U * sizeof (uint8_t)); - uint64_t - uu____0 = compute_iv_stdcall(iv_b, (uint64_t)iv_len, (uint64_t)len, tmp_iv, tmp_iv, hkeys_b); + KRML_HOST_IGNORE(compute_iv_stdcall(iv_b, + (uint64_t)iv_len, + (uint64_t)len, + tmp_iv, + tmp_iv, + hkeys_b)); uint8_t *inout_b = scratch_b; uint8_t *abytes_b = scratch_b + (uint32_t)16U; uint8_t *scratch_b1 = scratch_b + (uint32_t)32U; @@ -2052,7 +2161,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint64_t len128x6_ = len128x6 / (uint64_t)16U; uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -2070,7 +2179,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } else @@ -2085,7 +2193,7 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( uint64_t len128_num_ = len128_num / (uint64_t)16U; uint64_t len128x6_ = (uint64_t)0U; uint64_t - scrut2 = + c0 = gcm256_decrypt_opt(auth_b_, (uint64_t)ad_len, auth_num, @@ -2103,7 +2211,6 @@ EverCrypt_AEAD_decrypt_expand_aes256_gcm( (uint64_t)cipher_len, scratch_b1, tag); - uint64_t c0 = scrut2; c = c0; } memcpy(dst + (uint32_t)(uint64_t)cipher_len / (uint32_t)16U * (uint32_t)16U, @@ -2214,8 +2321,7 @@ Cleanup and free the AEAD state. */ void EverCrypt_AEAD_free(EverCrypt_AEAD_state_s *s) { - EverCrypt_AEAD_state_s scrut = *s; - uint8_t *ek = scrut.ek; + uint8_t *ek = (*s).ek; KRML_HOST_FREE(ek); KRML_HOST_FREE(s); } diff --git a/src/msvc/EverCrypt_AutoConfig2.c b/src/msvc/EverCrypt_AutoConfig2.c index fe93ef8a..b549d020 100644 --- a/src/msvc/EverCrypt_AutoConfig2.c +++ b/src/msvc/EverCrypt_AutoConfig2.c @@ -113,75 +113,59 @@ void EverCrypt_AutoConfig2_recall(void) void EverCrypt_AutoConfig2_init(void) { #if HACL_CAN_COMPILE_VALE - uint64_t scrut = check_aesni(); - if (scrut != (uint64_t)0U) + if (check_aesni() != (uint64_t)0U) { cpu_has_aesni[0U] = true; cpu_has_pclmulqdq[0U] = true; } - uint64_t scrut0 = check_sha(); - if (scrut0 != (uint64_t)0U) + if (check_sha() != (uint64_t)0U) { cpu_has_shaext[0U] = true; } - uint64_t scrut1 = check_adx_bmi2(); - if (scrut1 != (uint64_t)0U) + if (check_adx_bmi2() != (uint64_t)0U) { cpu_has_bmi2[0U] = true; cpu_has_adx[0U] = true; } - uint64_t scrut2 = check_avx(); - if (scrut2 != (uint64_t)0U) + if (check_avx() != (uint64_t)0U) { - uint64_t scrut3 = check_osxsave(); - if (scrut3 != (uint64_t)0U) + if (check_osxsave() != (uint64_t)0U) { - uint64_t scrut4 = check_avx_xcr0(); - if (scrut4 != (uint64_t)0U) + if (check_avx_xcr0() != (uint64_t)0U) { cpu_has_avx[0U] = true; } } } - uint64_t scrut3 = check_avx2(); - if (scrut3 != (uint64_t)0U) + if (check_avx2() != (uint64_t)0U) { - uint64_t scrut4 = check_osxsave(); - if (scrut4 != (uint64_t)0U) + if (check_osxsave() != (uint64_t)0U) { - uint64_t scrut5 = check_avx_xcr0(); - if (scrut5 != (uint64_t)0U) + if (check_avx_xcr0() != (uint64_t)0U) { cpu_has_avx2[0U] = true; } } } - uint64_t scrut4 = check_sse(); - if (scrut4 != (uint64_t)0U) + if (check_sse() != (uint64_t)0U) { cpu_has_sse[0U] = true; } - uint64_t scrut5 = check_movbe(); - if (scrut5 != (uint64_t)0U) + if (check_movbe() != (uint64_t)0U) { cpu_has_movbe[0U] = true; } - uint64_t scrut6 = check_rdrand(); - if (scrut6 != (uint64_t)0U) + if (check_rdrand() != (uint64_t)0U) { cpu_has_rdrand[0U] = true; } - uint64_t scrut7 = check_avx512(); - if (scrut7 != (uint64_t)0U) + if (check_avx512() != (uint64_t)0U) { - uint64_t scrut8 = check_osxsave(); - if (scrut8 != (uint64_t)0U) + if (check_osxsave() != (uint64_t)0U) { - uint64_t scrut9 = check_avx_xcr0(); - if (scrut9 != (uint64_t)0U) + if (check_avx_xcr0() != (uint64_t)0U) { - uint64_t scrut10 = check_avx512_xcr0(); - if (scrut10 != (uint64_t)0U) + if (check_avx512_xcr0() != (uint64_t)0U) { cpu_has_avx512[0U] = true; return; diff --git a/src/msvc/EverCrypt_DRBG.c b/src/msvc/EverCrypt_DRBG.c index 243d8eb4..9591823c 100644 --- a/src/msvc/EverCrypt_DRBG.c +++ b/src/msvc/EverCrypt_DRBG.c @@ -92,6 +92,7 @@ EverCrypt_DRBG_uu___is_SHA1_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA1_s) { return true; @@ -105,6 +106,7 @@ EverCrypt_DRBG_uu___is_SHA2_256_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA2_256_s) { return true; @@ -118,6 +120,7 @@ EverCrypt_DRBG_uu___is_SHA2_384_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA2_384_s) { return true; @@ -131,6 +134,7 @@ EverCrypt_DRBG_uu___is_SHA2_512_s( EverCrypt_DRBG_state_s projectee ) { + KRML_HOST_IGNORE(uu___); if (projectee.tag == SHA2_512_s) { return true; diff --git a/src/msvc/EverCrypt_Hash.c b/src/msvc/EverCrypt_Hash.c index 914a105f..b88df9e2 100644 --- a/src/msvc/EverCrypt_Hash.c +++ b/src/msvc/EverCrypt_Hash.c @@ -399,7 +399,7 @@ void EverCrypt_Hash_update_multi_256(uint32_t *s, uint8_t *blocks, uint32_t n) if (has_shaext && has_sse) { uint64_t n1 = (uint64_t)n; - uint64_t scrut = sha256_update(s, blocks, n1, k224_256); + KRML_HOST_IGNORE(sha256_update(s, blocks, n1, k224_256)); return; } Hacl_SHA2_Scalar32_sha256_update_nblocks(n * (uint32_t)64U, blocks, s); @@ -2156,8 +2156,7 @@ Perform a run-time test to determine which algorithm was chosen for the given pi Spec_Hash_Definitions_hash_alg EverCrypt_Hash_Incremental_alg_of_state(EverCrypt_Hash_Incremental_hash_state *s) { - EverCrypt_Hash_Incremental_hash_state scrut = *s; - EverCrypt_Hash_state_s *block_state = scrut.block_state; + EverCrypt_Hash_state_s *block_state = (*s).block_state; return alg_of_state(block_state); } diff --git a/src/msvc/EverCrypt_Poly1305.c b/src/msvc/EverCrypt_Poly1305.c index 717b9527..454c0fce 100644 --- a/src/msvc/EverCrypt_Poly1305.c +++ b/src/msvc/EverCrypt_Poly1305.c @@ -28,8 +28,13 @@ #include "internal/Vale.h" #include "config.h" -static void poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key) +KRML_MAYBE_UNUSED static void +poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key) { + KRML_HOST_IGNORE(dst); + KRML_HOST_IGNORE(src); + KRML_HOST_IGNORE(len); + KRML_HOST_IGNORE(key); #if HACL_CAN_COMPILE_VALE uint8_t ctx[192U] = { 0U }; memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t)); @@ -38,19 +43,16 @@ static void poly1305_vale(uint8_t *dst, uint8_t *src, uint32_t len, uint8_t *key uint8_t tmp[16U] = { 0U }; if (n_extra == (uint32_t)0U) { - uint64_t scrut = x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U); - KRML_HOST_IGNORE((void *)(uint8_t)0U); + KRML_HOST_IGNORE(x64_poly1305(ctx, src, (uint64_t)len, (uint64_t)1U)); } else { uint32_t len16 = n_blocks * (uint32_t)16U; uint8_t *src16 = src; memcpy(tmp, src + len16, n_extra * sizeof (uint8_t)); - uint64_t scrut = x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U); - KRML_HOST_IGNORE((void *)(uint8_t)0U); + KRML_HOST_IGNORE(x64_poly1305(ctx, src16, (uint64_t)len16, (uint64_t)0U)); memcpy(ctx + (uint32_t)24U, key, (uint32_t)32U * sizeof (uint8_t)); - uint64_t scrut0 = x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U); - KRML_HOST_IGNORE((void *)(uint8_t)0U); + KRML_HOST_IGNORE(x64_poly1305(ctx, tmp, (uint64_t)n_extra, (uint64_t)1U)); } memcpy(dst, ctx, (uint32_t)16U * sizeof (uint8_t)); #endif diff --git a/src/msvc/Hacl_Chacha20_Vec128.c b/src/msvc/Hacl_Chacha20_Vec128.c index ed112654..1e0c4ec1 100644 --- a/src/msvc/Hacl_Chacha20_Vec128.c +++ b/src/msvc/Hacl_Chacha20_Vec128.c @@ -370,9 +370,8 @@ Hacl_Chacha20_Vec128_chacha20_encrypt_128( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)256U; - uint8_t *uu____3 = text + nb * (uint32_t)256U; uint8_t plain[256U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)256U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; chacha20_core_128(k, ctx, nb); Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; @@ -676,9 +675,8 @@ Hacl_Chacha20_Vec128_chacha20_decrypt_128( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)256U; - uint8_t *uu____3 = cipher + nb * (uint32_t)256U; uint8_t plain[256U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)256U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(16) Lib_IntVector_Intrinsics_vec128 k[16U] KRML_POST_ALIGN(16) = { 0U }; chacha20_core_128(k, ctx, nb); Lib_IntVector_Intrinsics_vec128 st0 = k[0U]; diff --git a/src/msvc/Hacl_Chacha20_Vec256.c b/src/msvc/Hacl_Chacha20_Vec256.c index 2df300b6..620f5040 100644 --- a/src/msvc/Hacl_Chacha20_Vec256.c +++ b/src/msvc/Hacl_Chacha20_Vec256.c @@ -470,9 +470,8 @@ Hacl_Chacha20_Vec256_chacha20_encrypt_256( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)512U; - uint8_t *uu____3 = text + nb * (uint32_t)512U; uint8_t plain[512U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)512U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; chacha20_core_256(k, ctx, nb); Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; @@ -968,9 +967,8 @@ Hacl_Chacha20_Vec256_chacha20_decrypt_256( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)512U; - uint8_t *uu____3 = cipher + nb * (uint32_t)512U; uint8_t plain[512U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)512U, rem * sizeof (uint8_t)); KRML_PRE_ALIGN(32) Lib_IntVector_Intrinsics_vec256 k[16U] KRML_POST_ALIGN(32) = { 0U }; chacha20_core_256(k, ctx, nb); Lib_IntVector_Intrinsics_vec256 st0 = k[0U]; diff --git a/src/msvc/Hacl_Chacha20_Vec32.c b/src/msvc/Hacl_Chacha20_Vec32.c index 6f137f39..2bf4764c 100644 --- a/src/msvc/Hacl_Chacha20_Vec32.c +++ b/src/msvc/Hacl_Chacha20_Vec32.c @@ -229,9 +229,8 @@ Hacl_Chacha20_Vec32_chacha20_encrypt_32( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = text + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k[16U] = { 0U }; chacha20_core_32(k, ctx, nb); KRML_MAYBE_FOR16(i, @@ -279,9 +278,8 @@ Hacl_Chacha20_Vec32_chacha20_decrypt_32( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = cipher + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k[16U] = { 0U }; chacha20_core_32(k, ctx, nb); KRML_MAYBE_FOR16(i, diff --git a/src/msvc/Hacl_Curve25519_64.c b/src/msvc/Hacl_Curve25519_64.c index 526fbd22..fb0974fe 100644 --- a/src/msvc/Hacl_Curve25519_64.c +++ b/src/msvc/Hacl_Curve25519_64.c @@ -35,7 +35,7 @@ static inline void add_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2) #if HACL_CAN_COMPILE_INLINE_ASM add_scalar(out, f1, f2); #else - uint64_t uu____0 = add_scalar_e(out, f1, f2); + KRML_HOST_IGNORE(add_scalar_e(out, f1, f2)); #endif } @@ -44,7 +44,7 @@ static inline void fadd0(uint64_t *out, uint64_t *f1, uint64_t *f2) #if HACL_CAN_COMPILE_INLINE_ASM fadd(out, f1, f2); #else - uint64_t uu____0 = fadd_e(out, f1, f2); + KRML_HOST_IGNORE(fadd_e(out, f1, f2)); #endif } @@ -53,7 +53,7 @@ static inline void fsub0(uint64_t *out, uint64_t *f1, uint64_t *f2) #if HACL_CAN_COMPILE_INLINE_ASM fsub(out, f1, f2); #else - uint64_t uu____0 = fsub_e(out, f1, f2); + KRML_HOST_IGNORE(fsub_e(out, f1, f2)); #endif } @@ -62,7 +62,7 @@ static inline void fmul0(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *tm #if HACL_CAN_COMPILE_INLINE_ASM fmul(out, f1, f2, tmp); #else - uint64_t uu____0 = fmul_e(tmp, f1, out, f2); + KRML_HOST_IGNORE(fmul_e(tmp, f1, out, f2)); #endif } @@ -71,7 +71,7 @@ static inline void fmul20(uint64_t *out, uint64_t *f1, uint64_t *f2, uint64_t *t #if HACL_CAN_COMPILE_INLINE_ASM fmul2(out, f1, f2, tmp); #else - uint64_t uu____0 = fmul2_e(tmp, f1, out, f2); + KRML_HOST_IGNORE(fmul2_e(tmp, f1, out, f2)); #endif } @@ -80,7 +80,7 @@ static inline void fmul_scalar0(uint64_t *out, uint64_t *f1, uint64_t f2) #if HACL_CAN_COMPILE_INLINE_ASM fmul_scalar(out, f1, f2); #else - uint64_t uu____0 = fmul_scalar_e(out, f1, f2); + KRML_HOST_IGNORE(fmul_scalar_e(out, f1, f2)); #endif } @@ -89,7 +89,7 @@ static inline void fsqr0(uint64_t *out, uint64_t *f1, uint64_t *tmp) #if HACL_CAN_COMPILE_INLINE_ASM fsqr(out, f1, tmp); #else - uint64_t uu____0 = fsqr_e(tmp, f1, out); + KRML_HOST_IGNORE(fsqr_e(tmp, f1, out)); #endif } @@ -98,7 +98,7 @@ static inline void fsqr20(uint64_t *out, uint64_t *f, uint64_t *tmp) #if HACL_CAN_COMPILE_INLINE_ASM fsqr2(out, f, tmp); #else - uint64_t uu____0 = fsqr2_e(tmp, f, out); + KRML_HOST_IGNORE(fsqr2_e(tmp, f, out)); #endif } @@ -107,7 +107,7 @@ static inline void cswap20(uint64_t bit, uint64_t *p1, uint64_t *p2) #if HACL_CAN_COMPILE_INLINE_ASM cswap2(bit, p1, p2); #else - uint64_t uu____0 = cswap2_e(bit, p1, p2); + KRML_HOST_IGNORE(cswap2_e(bit, p1, p2)); #endif } diff --git a/src/msvc/Hacl_Ed25519.c b/src/msvc/Hacl_Ed25519.c index 9d7c3bd4..f9881e91 100644 --- a/src/msvc/Hacl_Ed25519.c +++ b/src/msvc/Hacl_Ed25519.c @@ -711,65 +711,53 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) FStar_UInt128_uint128 c00 = carry0; FStar_UInt128_uint128 carry1 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z11, c00), (uint32_t)56U); - uint64_t - t100 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z11, c00)) - & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c10 = carry1; FStar_UInt128_uint128 carry2 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z21, c10), (uint32_t)56U); - uint64_t - t101 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z21, c10)) - & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c20 = carry2; FStar_UInt128_uint128 carry3 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z31, c20), (uint32_t)56U); - uint64_t - t102 = - FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z31, c20)) - & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c30 = carry3; FStar_UInt128_uint128 carry4 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z41, c30), (uint32_t)56U); uint64_t - t103 = + t100 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z41, c30)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c40 = carry4; - uint64_t t410 = t103; + uint64_t t410 = t100; FStar_UInt128_uint128 carry5 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z5, c40), (uint32_t)56U); uint64_t - t104 = + t101 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z5, c40)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c5 = carry5; - uint64_t t51 = t104; + uint64_t t51 = t101; FStar_UInt128_uint128 carry6 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z6, c5), (uint32_t)56U); uint64_t - t105 = + t102 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z6, c5)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c6 = carry6; - uint64_t t61 = t105; + uint64_t t61 = t102; FStar_UInt128_uint128 carry7 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z7, c6), (uint32_t)56U); uint64_t - t106 = + t103 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z7, c6)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c7 = carry7; - uint64_t t71 = t106; + uint64_t t71 = t103; FStar_UInt128_uint128 carry8 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(z8, c7), (uint32_t)56U); uint64_t - t107 = + t104 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(z8, c7)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c8 = carry8; - uint64_t t81 = t107; + uint64_t t81 = t104; uint64_t t91 = FStar_UInt128_uint128_to_uint64(c8); uint64_t qmu4_ = t410; uint64_t qmu5_ = t51; @@ -818,19 +806,19 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) FStar_UInt128_uint128 xy31 = FStar_UInt128_mul_wide(qdiv3, m1); FStar_UInt128_uint128 xy40 = FStar_UInt128_mul_wide(qdiv4, m0); FStar_UInt128_uint128 carry9 = FStar_UInt128_shift_right(xy00, (uint32_t)56U); - uint64_t t108 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU; + uint64_t t105 = FStar_UInt128_uint128_to_uint64(xy00) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c0 = carry9; - uint64_t t010 = t108; + uint64_t t010 = t105; FStar_UInt128_uint128 carry10 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0), (uint32_t)56U); uint64_t - t109 = + t106 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy01, xy10), c0)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c11 = carry10; - uint64_t t110 = t109; + uint64_t t110 = t106; FStar_UInt128_uint128 carry11 = FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, @@ -839,14 +827,14 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) c11), (uint32_t)56U); uint64_t - t1010 = + t107 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy02, xy11), xy20), c11)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c21 = carry11; - uint64_t t210 = t1010; + uint64_t t210 = t107; FStar_UInt128_uint128 carry = FStar_UInt128_shift_right(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, @@ -856,7 +844,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) c21), (uint32_t)56U); uint64_t - t1011 = + t108 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy03, xy12), xy21), @@ -864,7 +852,7 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) c21)) & (uint64_t)0xffffffffffffffU; FStar_UInt128_uint128 c31 = carry; - uint64_t t310 = t1011; + uint64_t t310 = t108; uint64_t t411 = FStar_UInt128_uint128_to_uint64(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(FStar_UInt128_add_mod(xy04, @@ -880,24 +868,24 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) uint64_t qmul3 = t310; uint64_t qmul4 = t411; uint64_t b5 = (r0 - qmul0) >> (uint32_t)63U; - uint64_t t1012 = (b5 << (uint32_t)56U) + r0 - qmul0; + uint64_t t109 = (b5 << (uint32_t)56U) + r0 - qmul0; uint64_t c1 = b5; - uint64_t t011 = t1012; + uint64_t t011 = t109; uint64_t b6 = (r1 - (qmul1 + c1)) >> (uint32_t)63U; - uint64_t t1013 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1); + uint64_t t1010 = (b6 << (uint32_t)56U) + r1 - (qmul1 + c1); uint64_t c2 = b6; - uint64_t t111 = t1013; + uint64_t t111 = t1010; uint64_t b7 = (r2 - (qmul2 + c2)) >> (uint32_t)63U; - uint64_t t1014 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2); + uint64_t t1011 = (b7 << (uint32_t)56U) + r2 - (qmul2 + c2); uint64_t c3 = b7; - uint64_t t211 = t1014; + uint64_t t211 = t1011; uint64_t b8 = (r3 - (qmul3 + c3)) >> (uint32_t)63U; - uint64_t t1015 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3); + uint64_t t1012 = (b8 << (uint32_t)56U) + r3 - (qmul3 + c3); uint64_t c4 = b8; - uint64_t t311 = t1015; + uint64_t t311 = t1012; uint64_t b9 = (r4 - (qmul4 + c4)) >> (uint32_t)63U; - uint64_t t1016 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4); - uint64_t t412 = t1016; + uint64_t t1013 = (b9 << (uint32_t)40U) + r4 - (qmul4 + c4); + uint64_t t412 = t1013; uint64_t s0 = t011; uint64_t s1 = t111; uint64_t s2 = t211; @@ -914,21 +902,21 @@ static inline void barrett_reduction(uint64_t *z, uint64_t *t) uint64_t y3 = m31; uint64_t y4 = m41; uint64_t b10 = (s0 - y0) >> (uint32_t)63U; - uint64_t t1017 = (b10 << (uint32_t)56U) + s0 - y0; + uint64_t t1014 = (b10 << (uint32_t)56U) + s0 - y0; uint64_t b0 = b10; - uint64_t t01 = t1017; + uint64_t t01 = t1014; uint64_t b11 = (s1 - (y1 + b0)) >> (uint32_t)63U; - uint64_t t1018 = (b11 << (uint32_t)56U) + s1 - (y1 + b0); + uint64_t t1015 = (b11 << (uint32_t)56U) + s1 - (y1 + b0); uint64_t b1 = b11; - uint64_t t11 = t1018; + uint64_t t11 = t1015; uint64_t b12 = (s2 - (y2 + b1)) >> (uint32_t)63U; - uint64_t t1019 = (b12 << (uint32_t)56U) + s2 - (y2 + b1); + uint64_t t1016 = (b12 << (uint32_t)56U) + s2 - (y2 + b1); uint64_t b2 = b12; - uint64_t t21 = t1019; + uint64_t t21 = t1016; uint64_t b13 = (s3 - (y3 + b2)) >> (uint32_t)63U; - uint64_t t1020 = (b13 << (uint32_t)56U) + s3 - (y3 + b2); + uint64_t t1017 = (b13 << (uint32_t)56U) + s3 - (y3 + b2); uint64_t b3 = b13; - uint64_t t31 = t1020; + uint64_t t31 = t1017; uint64_t b = (s4 - (y4 + b3)) >> (uint32_t)63U; uint64_t t10 = (b << (uint32_t)56U) + s4 - (y4 + b3); uint64_t b4 = b; diff --git a/src/msvc/Hacl_FFDHE.c b/src/msvc/Hacl_FFDHE.c index 53b87f73..bc77dbdc 100644 --- a/src/msvc/Hacl_FFDHE.c +++ b/src/msvc/Hacl_FFDHE.c @@ -127,7 +127,6 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui memset(p_n1, 0U, nLen * sizeof (uint64_t)); uint64_t c0 = Lib_IntTypes_Intrinsics_sub_borrow_u64((uint64_t)0U, p_n[0U], (uint64_t)1U, p_n1); - uint64_t c1; if ((uint32_t)1U < nLen) { uint64_t *a1 = p_n + (uint32_t)1U; @@ -159,12 +158,12 @@ static inline uint64_t ffdhe_check_pk(Spec_FFDHE_ffdhe_alg a, uint64_t *pk_n, ui uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_sub_borrow_u64(c, t1, (uint64_t)0U, res_i); } - uint64_t c10 = c; - c1 = c10; + uint64_t c1 = c; + KRML_HOST_IGNORE(c1); } else { - c1 = c0; + KRML_HOST_IGNORE(c0); } KRML_CHECK_SIZE(sizeof (uint64_t), nLen); uint64_t *b2 = (uint64_t *)alloca(nLen * sizeof (uint64_t)); diff --git a/src/msvc/Hacl_Frodo_KEM.c b/src/msvc/Hacl_Frodo_KEM.c index 13db363a..4265ac0e 100644 --- a/src/msvc/Hacl_Frodo_KEM.c +++ b/src/msvc/Hacl_Frodo_KEM.c @@ -30,6 +30,6 @@ void randombytes_(uint32_t len, uint8_t *res) { - bool b = Lib_RandomBuffer_System_randombytes(res, len); + KRML_HOST_IGNORE(Lib_RandomBuffer_System_randombytes(res, len)); } diff --git a/src/msvc/Hacl_HMAC_DRBG.c b/src/msvc/Hacl_HMAC_DRBG.c index 93e47dc9..b3acf354 100644 --- a/src/msvc/Hacl_HMAC_DRBG.c +++ b/src/msvc/Hacl_HMAC_DRBG.c @@ -71,6 +71,8 @@ uint32_t Hacl_HMAC_DRBG_min_length(Spec_Hash_Definitions_hash_alg a) bool Hacl_HMAC_DRBG_uu___is_State(Spec_Hash_Definitions_hash_alg a, Hacl_HMAC_DRBG_state projectee) { + KRML_HOST_IGNORE(a); + KRML_HOST_IGNORE(projectee); return true; } @@ -1104,6 +1106,7 @@ Hacl_HMAC_DRBG_generate( void Hacl_HMAC_DRBG_free(Spec_Hash_Definitions_hash_alg uu___, Hacl_HMAC_DRBG_state s) { + KRML_HOST_IGNORE(uu___); uint8_t *k = s.k; uint8_t *v = s.v; uint32_t *ctr = s.reseed_counter; diff --git a/src/msvc/Hacl_Hash_Blake2.c b/src/msvc/Hacl_Hash_Blake2.c index 194e7157..aecc6165 100644 --- a/src/msvc/Hacl_Hash_Blake2.c +++ b/src/msvc/Hacl_Hash_Blake2.c @@ -545,6 +545,7 @@ Hacl_Blake2b_32_blake2b_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { FStar_UInt128_uint128 @@ -1192,6 +1193,7 @@ Hacl_Blake2s_32_blake2s_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U); diff --git a/src/msvc/Hacl_Hash_Blake2b_256.c b/src/msvc/Hacl_Hash_Blake2b_256.c index d0df7cd8..b37ffc5f 100644 --- a/src/msvc/Hacl_Hash_Blake2b_256.c +++ b/src/msvc/Hacl_Hash_Blake2b_256.c @@ -268,6 +268,7 @@ Hacl_Blake2b_256_blake2b_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { FStar_UInt128_uint128 diff --git a/src/msvc/Hacl_Hash_Blake2s_128.c b/src/msvc/Hacl_Hash_Blake2s_128.c index 5bf06711..86c4f030 100644 --- a/src/msvc/Hacl_Hash_Blake2s_128.c +++ b/src/msvc/Hacl_Hash_Blake2s_128.c @@ -268,6 +268,7 @@ Hacl_Blake2s_128_blake2s_update_multi( uint32_t nb ) { + KRML_HOST_IGNORE(len); for (uint32_t i = (uint32_t)0U; i < nb; i++) { uint64_t totlen = prev + (uint64_t)((i + (uint32_t)1U) * (uint32_t)64U); diff --git a/src/msvc/Hacl_Hash_MD5.c b/src/msvc/Hacl_Hash_MD5.c index 1b376960..222ac824 100644 --- a/src/msvc/Hacl_Hash_MD5.c +++ b/src/msvc/Hacl_Hash_MD5.c @@ -1218,7 +1218,6 @@ void Hacl_Streaming_MD5_legacy_init(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Hash_Core_MD5_legacy_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/msvc/Hacl_Hash_SHA1.c b/src/msvc/Hacl_Hash_SHA1.c index 80edc004..5ecb3c0b 100644 --- a/src/msvc/Hacl_Hash_SHA1.c +++ b/src/msvc/Hacl_Hash_SHA1.c @@ -254,7 +254,6 @@ void Hacl_Streaming_SHA1_legacy_init(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Hash_Core_SHA1_legacy_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/msvc/Hacl_Hash_SHA2.c b/src/msvc/Hacl_Hash_SHA2.c index 46fde83f..c93c3616 100644 --- a/src/msvc/Hacl_Hash_SHA2.c +++ b/src/msvc/Hacl_Hash_SHA2.c @@ -537,7 +537,6 @@ void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha256_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -836,7 +835,6 @@ void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_MD_state_32 *s) Hacl_Streaming_MD_state_32 scrut = *s; uint8_t *buf = scrut.buf; uint32_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha224_init(block_state); Hacl_Streaming_MD_state_32 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -962,7 +960,6 @@ void Hacl_Streaming_SHA2_init_512(Hacl_Streaming_MD_state_64 *s) Hacl_Streaming_MD_state_64 scrut = *s; uint8_t *buf = scrut.buf; uint64_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha512_init(block_state); Hacl_Streaming_MD_state_64 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -1262,7 +1259,6 @@ void Hacl_Streaming_SHA2_init_384(Hacl_Streaming_MD_state_64 *s) Hacl_Streaming_MD_state_64 scrut = *s; uint8_t *buf = scrut.buf; uint64_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_SHA2_Scalar32_sha384_init(block_state); Hacl_Streaming_MD_state_64 tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/msvc/Hacl_Hash_SHA3.c b/src/msvc/Hacl_Hash_SHA3.c index 5f4707f4..19d13b1b 100644 --- a/src/msvc/Hacl_Hash_SHA3.c +++ b/src/msvc/Hacl_Hash_SHA3.c @@ -125,10 +125,9 @@ Hacl_Hash_SHA3_update_last_sha3( if (input_len == len) { Hacl_Impl_SHA3_absorb_inner(len, input, s); - uint8_t *uu____0 = input + input_len; uint8_t lastBlock_[200U] = { 0U }; uint8_t *lastBlock = lastBlock_; - memcpy(lastBlock, uu____0, (uint32_t)0U * sizeof (uint8_t)); + memcpy(lastBlock, input + input_len, (uint32_t)0U * sizeof (uint8_t)); lastBlock[0U] = suffix; Hacl_Impl_SHA3_loadState(len, lastBlock, s); if (!((suffix & (uint8_t)0x80U) == (uint8_t)0U) && (uint32_t)0U == len - (uint32_t)1U) @@ -167,8 +166,7 @@ hash_buf2; Spec_Hash_Definitions_hash_alg Hacl_Streaming_Keccak_get_alg(Hacl_Streaming_Keccak_state *s) { - Hacl_Streaming_Keccak_state scrut = *s; - Hacl_Streaming_Keccak_hash_buf block_state = scrut.block_state; + Hacl_Streaming_Keccak_hash_buf block_state = (*s).block_state; return block_state.fst; } @@ -809,6 +807,7 @@ Hacl_Impl_SHA3_keccak( uint8_t *output ) { + KRML_HOST_IGNORE(capacity); uint32_t rateInBytes = rate / (uint32_t)8U; uint64_t s[25U] = { 0U }; absorb(s, rateInBytes, inputByteLen, input, delimitedSuffix); diff --git a/src/msvc/Hacl_K256_ECDSA.c b/src/msvc/Hacl_K256_ECDSA.c index 19395653..c5dda43f 100644 --- a/src/msvc/Hacl_K256_ECDSA.c +++ b/src/msvc/Hacl_K256_ECDSA.c @@ -498,7 +498,7 @@ mul_pow2_256_minus_q_add( uint64_t r = c; tmp[len + i0] = r;); memcpy(res + (uint32_t)2U, a, len * sizeof (uint64_t)); - uint64_t uu____0 = bn_add(resLen, res, len + (uint32_t)2U, tmp, res); + KRML_HOST_IGNORE(bn_add(resLen, res, len + (uint32_t)2U, tmp, res)); uint64_t c = bn_add(resLen, res, (uint32_t)4U, e, res); return c; } @@ -514,15 +514,23 @@ static inline void modq(uint64_t *out, uint64_t *a) uint64_t *t01 = tmp; uint64_t m[7U] = { 0U }; uint64_t p[5U] = { 0U }; - uint64_t - c0 = mul_pow2_256_minus_q_add((uint32_t)4U, (uint32_t)7U, t01, a + (uint32_t)4U, a, m); - uint64_t - c10 = mul_pow2_256_minus_q_add((uint32_t)3U, (uint32_t)5U, t01, m + (uint32_t)4U, m, p); + KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)4U, + (uint32_t)7U, + t01, + a + (uint32_t)4U, + a, + m)); + KRML_HOST_IGNORE(mul_pow2_256_minus_q_add((uint32_t)3U, + (uint32_t)5U, + t01, + m + (uint32_t)4U, + m, + p)); uint64_t c2 = mul_pow2_256_minus_q_add((uint32_t)1U, (uint32_t)4U, t01, p + (uint32_t)4U, p, r); - uint64_t c00 = c2; + uint64_t c0 = c2; uint64_t c1 = add4(r, tmp, out); - uint64_t mask = (uint64_t)0U - (c00 + c1); + uint64_t mask = (uint64_t)0U - (c0 + c1); KRML_MAYBE_FOR4(i, (uint32_t)0U, (uint32_t)4U, @@ -612,7 +620,7 @@ static inline void qmul_shift_384(uint64_t *res, uint64_t *a, uint64_t *b) uint64_t *res_i = res1 + i; c = Lib_IntTypes_Intrinsics_add_carry_u64(c, t1, (uint64_t)0U, res_i);); uint64_t c1 = c; - uint64_t uu____0 = c1; + KRML_HOST_IGNORE(c1); uint64_t flag = l[5U] >> (uint32_t)63U; uint64_t mask = (uint64_t)0U - flag; KRML_MAYBE_FOR4(i, @@ -1223,6 +1231,7 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar) (uint64_t)118285133003718U, (uint64_t)434519962075150U, (uint64_t)1114612377498854U, (uint64_t)3488596944003813U, (uint64_t)450716531072892U, (uint64_t)66044973203836U }; + KRML_HOST_IGNORE(q2); uint64_t q3[15U] = { @@ -1232,6 +1241,7 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar) (uint64_t)265969268774814U, (uint64_t)1913228635640715U, (uint64_t)2831959046949342U, (uint64_t)888030405442963U, (uint64_t)1817092932985033U, (uint64_t)101515844997121U }; + KRML_HOST_IGNORE(q3); uint64_t q4[15U] = { @@ -1241,6 +1251,7 @@ static inline void point_mul_g(uint64_t *out, uint64_t *scalar) (uint64_t)12245672982162U, (uint64_t)2119364213800870U, (uint64_t)2034960311715107U, (uint64_t)3172697815804487U, (uint64_t)4185144850224160U, (uint64_t)2792055915674U }; + KRML_HOST_IGNORE(q4); uint64_t *r1 = scalar; uint64_t *r2 = scalar + (uint32_t)1U; uint64_t *r3 = scalar + (uint32_t)2U; @@ -1605,6 +1616,7 @@ Hacl_K256_ECDSA_ecdsa_sign_hashed_msg( ) { uint64_t oneq[4U] = { (uint64_t)0x1U, (uint64_t)0x0U, (uint64_t)0x0U, (uint64_t)0x0U }; + KRML_HOST_IGNORE(oneq); uint64_t rsdk_q[16U] = { 0U }; uint64_t *r_q = rsdk_q; uint64_t *s_q = rsdk_q + (uint32_t)4U; diff --git a/src/msvc/Hacl_RSAPSS.c b/src/msvc/Hacl_RSAPSS.c index ce2fb517..084f10b3 100644 --- a/src/msvc/Hacl_RSAPSS.c +++ b/src/msvc/Hacl_RSAPSS.c @@ -404,9 +404,9 @@ load_skey( Sign a message `msg` and write the signature to `sgnt`. @param a Hash algorithm to use. Allowed values for `a` are ... - * Spec_Hash_Definitions_SHA2_256, - * Spec_Hash_Definitions_SHA2_384, and - * Spec_Hash_Definitions_SHA2_512. + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. @@ -518,7 +518,10 @@ Hacl_RSAPSS_rsapss_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param pkey Pointer to public key created by `Hacl_RSAPSS_new_rsapss_load_pkey`. @@ -637,10 +640,10 @@ Load a public key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. -@return Returns an allocated public key. Note: caller must take care to `free()` the created key. +@return Returns an allocated public key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_pkey(uint32_t modBits, uint32_t eBits, uint8_t *nb, uint8_t *eb) @@ -707,11 +710,11 @@ Load a secret key from key parts. @param modBits Count of bits in modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. -@return Returns an allocated secret key. Note: caller must take care to `free()` the created key. +@return Returns an allocated secret key upon success, otherwise, `NULL` if key part arguments are invalid or memory allocation fails. Note: caller must take care to `free()` the created key. */ uint64_t *Hacl_RSAPSS_new_rsapss_load_skey( @@ -804,13 +807,16 @@ uint64_t /** Sign a message `msg` and write the signature to `sgnt`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. @param dBits Count of bits in `d` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. -@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. +@param db Pointer to `ceil(modBits / 8)` bytes where the `d` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param salt Pointer to `saltLen` bytes where the salt is read from. @param msgLen Length of message. @@ -875,11 +881,14 @@ Hacl_RSAPSS_rsapss_skey_sign( /** Verify the signature `sgnt` of a message `msg`. -@param a Hash algorithm to use. +@param a Hash algorithm to use. Allowed values for `a` are ... + - Spec_Hash_Definitions_SHA2_256, + - Spec_Hash_Definitions_SHA2_384, and + - Spec_Hash_Definitions_SHA2_512. @param modBits Count of bits in the modulus (`n`). @param eBits Count of bits in `e` value. -@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`) is read from. -@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value is read from. +@param nb Pointer to `ceil(modBits / 8)` bytes where the modulus (`n`), in big-endian byte order, is read from. +@param eb Pointer to `ceil(modBits / 8)` bytes where the `e` value, in big-endian byte order, is read from. @param saltLen Length of salt. @param sgntLen Length of signature. @param sgnt Pointer to `sgntLen` bytes where the signature is read from. diff --git a/src/msvc/Hacl_Salsa20.c b/src/msvc/Hacl_Salsa20.c index e157d5ef..2758f8a4 100644 --- a/src/msvc/Hacl_Salsa20.c +++ b/src/msvc/Hacl_Salsa20.c @@ -181,6 +181,7 @@ salsa20_encrypt( memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t)); ctx[15U] = (uint32_t)0x6b206574U; uint32_t k[16U] = { 0U }; + KRML_HOST_IGNORE(k); uint32_t rem = len % (uint32_t)64U; uint32_t nb = len / (uint32_t)64U; uint32_t rem1 = len % (uint32_t)64U; @@ -217,9 +218,8 @@ salsa20_encrypt( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = text + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, text + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k1[16U] = { 0U }; salsa20_core(k1, ctx, nb); uint32_t bl[16U] = { 0U }; @@ -294,6 +294,7 @@ salsa20_decrypt( memcpy(ctx + (uint32_t)11U, k10, (uint32_t)4U * sizeof (uint32_t)); ctx[15U] = (uint32_t)0x6b206574U; uint32_t k[16U] = { 0U }; + KRML_HOST_IGNORE(k); uint32_t rem = len % (uint32_t)64U; uint32_t nb = len / (uint32_t)64U; uint32_t rem1 = len % (uint32_t)64U; @@ -330,9 +331,8 @@ salsa20_decrypt( if (rem1 > (uint32_t)0U) { uint8_t *uu____2 = out + nb * (uint32_t)64U; - uint8_t *uu____3 = cipher + nb * (uint32_t)64U; uint8_t plain[64U] = { 0U }; - memcpy(plain, uu____3, rem * sizeof (uint8_t)); + memcpy(plain, cipher + nb * (uint32_t)64U, rem * sizeof (uint8_t)); uint32_t k1[16U] = { 0U }; salsa20_core(k1, ctx, nb); uint32_t bl[16U] = { 0U }; diff --git a/src/msvc/Hacl_Streaming_Blake2.c b/src/msvc/Hacl_Streaming_Blake2.c index 4faa859e..948d56c2 100644 --- a/src/msvc/Hacl_Streaming_Blake2.c +++ b/src/msvc/Hacl_Streaming_Blake2.c @@ -54,7 +54,6 @@ void Hacl_Streaming_Blake2_blake2s_32_no_key_init(Hacl_Streaming_Blake2_blake2s_ Hacl_Streaming_Blake2_blake2s_32_state scrut = *s1; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2_blake2s_32_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2s_32_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); Hacl_Streaming_Blake2_blake2s_32_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; @@ -354,7 +353,6 @@ void Hacl_Streaming_Blake2_blake2b_32_no_key_init(Hacl_Streaming_Blake2_blake2b_ Hacl_Streaming_Blake2_blake2b_32_state scrut = *s1; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2_blake2b_32_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2b_32_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); Hacl_Streaming_Blake2_blake2b_32_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/msvc/Hacl_Streaming_Blake2b_256.c b/src/msvc/Hacl_Streaming_Blake2b_256.c index d2df234a..bdb5433f 100644 --- a/src/msvc/Hacl_Streaming_Blake2b_256.c +++ b/src/msvc/Hacl_Streaming_Blake2b_256.c @@ -66,7 +66,6 @@ Hacl_Streaming_Blake2b_256_blake2b_256_no_key_init( Hacl_Streaming_Blake2b_256_blake2b_256_state scrut = *s; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2b_256_blake2b_256_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2b_256_blake2b_init(block_state.snd, (uint32_t)0U, (uint32_t)64U); Hacl_Streaming_Blake2b_256_blake2b_256_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/msvc/Hacl_Streaming_Blake2s_128.c b/src/msvc/Hacl_Streaming_Blake2s_128.c index eaace7ce..f97bf5d0 100644 --- a/src/msvc/Hacl_Streaming_Blake2s_128.c +++ b/src/msvc/Hacl_Streaming_Blake2s_128.c @@ -66,7 +66,6 @@ Hacl_Streaming_Blake2s_128_blake2s_128_no_key_init( Hacl_Streaming_Blake2s_128_blake2s_128_state scrut = *s; uint8_t *buf = scrut.buf; Hacl_Streaming_Blake2s_128_blake2s_128_block_state block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Blake2s_128_blake2s_init(block_state.snd, (uint32_t)0U, (uint32_t)32U); Hacl_Streaming_Blake2s_128_blake2s_128_state tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; diff --git a/src/msvc/Hacl_Streaming_Poly1305_128.c b/src/msvc/Hacl_Streaming_Poly1305_128.c index c752cfb0..c3f7c19a 100644 --- a/src/msvc/Hacl_Streaming_Poly1305_128.c +++ b/src/msvc/Hacl_Streaming_Poly1305_128.c @@ -58,7 +58,6 @@ Hacl_Streaming_Poly1305_128_init(uint8_t *k, Hacl_Streaming_Poly1305_128_poly130 uint8_t *k_ = scrut.p_key; uint8_t *buf = scrut.buf; Lib_IntVector_Intrinsics_vec128 *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Poly1305_128_poly1305_init(block_state, k); memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); uint8_t *k_1 = k_; @@ -312,7 +311,7 @@ Hacl_Streaming_Poly1305_128_finish( { ite1 = r % (uint32_t)16U; } - uint64_t prev_len_last = total_len - (uint64_t)ite1; + KRML_HOST_IGNORE(total_len - (uint64_t)ite1); uint32_t ite2; if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) { diff --git a/src/msvc/Hacl_Streaming_Poly1305_256.c b/src/msvc/Hacl_Streaming_Poly1305_256.c index c1915ed9..e56275a4 100644 --- a/src/msvc/Hacl_Streaming_Poly1305_256.c +++ b/src/msvc/Hacl_Streaming_Poly1305_256.c @@ -58,7 +58,6 @@ Hacl_Streaming_Poly1305_256_init(uint8_t *k, Hacl_Streaming_Poly1305_256_poly130 uint8_t *k_ = scrut.p_key; uint8_t *buf = scrut.buf; Lib_IntVector_Intrinsics_vec256 *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Poly1305_256_poly1305_init(block_state, k); memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); uint8_t *k_1 = k_; @@ -312,7 +311,7 @@ Hacl_Streaming_Poly1305_256_finish( { ite1 = r % (uint32_t)16U; } - uint64_t prev_len_last = total_len - (uint64_t)ite1; + KRML_HOST_IGNORE(total_len - (uint64_t)ite1); uint32_t ite2; if (r % (uint32_t)16U == (uint32_t)0U && r > (uint32_t)0U) { diff --git a/src/msvc/Hacl_Streaming_Poly1305_32.c b/src/msvc/Hacl_Streaming_Poly1305_32.c index 89852727..249a622f 100644 --- a/src/msvc/Hacl_Streaming_Poly1305_32.c +++ b/src/msvc/Hacl_Streaming_Poly1305_32.c @@ -53,7 +53,6 @@ Hacl_Streaming_Poly1305_32_init(uint8_t *k, Hacl_Streaming_Poly1305_32_poly1305_ uint8_t *k_ = scrut.p_key; uint8_t *buf = scrut.buf; uint64_t *block_state = scrut.block_state; - KRML_HOST_IGNORE((void *)(uint8_t)0U); Hacl_Poly1305_32_poly1305_init(block_state, k); memcpy(k_, k, (uint32_t)32U * sizeof (uint8_t)); uint8_t *k_1 = k_; diff --git a/src/wasm/EverCrypt_Hash.wasm b/src/wasm/EverCrypt_Hash.wasm index 6b1a6c3fcd7de9879f50ecfa716db8f695b4acdf..8fdc7b27e028cff8b53492b41b29a176bc2bb191 100644 GIT binary patch delta 1974 zcmZ9NOHU+K6vwOjRR!HM-89{FSHsZf&_F+Epy&sW=6L~uh8dtW1I$dKnWZ?5@Otri3`7jaWWe>ZX|K#!pAwcit56o7xh1V9{=CDw^A=Ig1=q_ zpKZkTKQA}q@0!uS&(F`bVBo27rpuo)n|kiDH)|z(uRf=6An^3$O#bnvCATV5{U-K; z4m~3DMCklGg@|J-NduwDNOWw$T zhKevly)wRDSq_usEL--H*uf%X(V|7=s3PYhut;?Y29)X& zn>s|wB^dM`Wo}9le70bj!{WJydC1F7v)BLthak@cdFqw-^#W2%04{-(rkSc~@dssnaqYvo=Q?7-51DhXAoa1f#;nG3qhqn>tF$B`A20^1-xo2ER{P<~So1 zp{Ni_P;wB8j8LRzMPIWLDMl!{2u_lNP{J}HRH&ljLnyQI5XP12B2*Zm!u2aq#tA~G zvYAP0GU*#zCFL?qc#p0H{s$+i#fmuvQ{od{49RmlCEu{E;51Cj_w8EH23tO}YdteC zBYxIsg39>Uwv3tptV3PhzXmiM^g6e!lZU#`LxU8fH{7{S5?g-+4QxUaXQ3$)l*4H_ z2Q{3Bnh0?qM1cibPK$cBM96!`W?6a5nOBN$D8IxlUQJ7W`t?9}|(h@A;GHrE*FjuL|st;&|l)Kbr?@{LKXfy?gP{Ndn)pB&Lpoh+=nnG$x z>#*)DN$`>c^%TCI>!f%|>+X`ABz9xLI&Q!QZo)=)Nw2^fZpo8+%iQK;-+>)<2iwqg zX6*2c9h$M@o6#o4GurM9C&@X8HtxbM?!hkEbr0e$Y~wxHb`HX%gV?8oIG~;fzK!mY z>fX|OO7Sh_*X)2ddY}&Ckjf5yWp_z+%l4JJE<5D1L$(zsOC9Lot8@^r@#TB|xGKMC z4(huy-Lzx(3jx|N;UB?~!as&%2mgrik7(wR5C51H;~%^DP7==J0An7p4F+TIIUzj!0P{8vCvY1Za2!Z%gPV5Kc1t3)n`G4{PFi(u z-9?vGvTquRR^7E(A7)CstZ! z@99C{k=y&<#l?lmCX&s?t6$Mm6^ST8cO=u(NtkJGls1x+cPxe!?z8!Tf3P2zD zI3u|p13YiE& zXl5c8hOl8G#C0Je4%vvqgxEybAd{Cb*IE$9Rk9JG(umE*DxtZJ6&=lOMA$||x3P** zvav=wVnh+MOEA%NONLCIJbI zguKW>3lg|N>Vb;DrhbEwbzs(YG})wspzKkG!0JI)zkx{VuZaRCi6Uv6bB&Ox9<8OR zo_8>*tN-jfP5#^nxv#T>6r^+qX-FFmQe2oK=9G;&O^6+&^gC1(zD@;QCyAuoTT z?_JoHe-_HF0=>{86lVvkQi76UsmPX!G^uEtR3gNdN@hSt5$pS~gk>mW15)6uN2 z!F>&u{7>N(5Z)oegO=#YCM@O^1A@>bgmgwVMxQq8F@e{UrN-U=~LnnmH!+OtJn&x;1 zPnkvQQ?GncT9NQdlY7oTz~lG&+mkmtW0_ptd%> XKRi`s@o~Zd3-aBM;pso${~r7wCd_Bg diff --git a/src/wasm/Hacl_Bignum.wasm b/src/wasm/Hacl_Bignum.wasm index 579c0d56f1da11845ae83f0630d77869216c5fb2..b9c99c898a2c73ae248089b38ce824515f17262c 100644 GIT binary patch delta 4324 zcmcgvX>3&26`u2+!A>ltQko{Hvcz{<60;;YEHR52+cO4(+0Bv_$kMW8AS7gISv))& z&m(5B!G3@-O8^rBNo19jgi5I@^-q5ES3^nCHX(&15Yi^>yTQZjCx*7V-F(J@L{B@r3nLMnarkVoE5G~Q=Nz1HVico!v8;IsKJUaL ze8CAChjt78xHz}_MF;YC){%E4a7kHwS%bG7+%+0RU zxcSy#MOJAnBD*gIkziPgjf$?aEzy>c=q48%h3KYmS2nxYY7AENOUe7u((C|z?%7w+=ML8!!bKPwk5plB6%WHOj$$YmBZ^|5OVlID zr6^Ta3?+!AI4;h(&t)i!m9*9Ia16sxpGU+!jPT=B#pM`*@)VE6NF5&~f#GpV`WPj@ z@knhdPPJTtA`BZOn^@uS=-~and3TP%5OWN3`s12+^rLP~1-agMjK^r1EY1lS z&lBZYDV3>|g6XOa2#c03%^3MiE9b>zsa|qSCSk0xr8a8y$(X3zQ!oirsw8usimCiC zCh;S97?1e;sCp#%F+8TM$2~lX$5Z@-c%Sfj8m8&Irei85`8-3?o1yc55;O2*il4$$ zI{vf-rt7?)mS0RUc|U{6m}*k_1t#JdGyEkiVL95C^UO5ogr~kK#e0_FJsUGITl1cS zIs7bUastmH;qzSeNb)?)Q`U2si|0~2U!3!Oejd+jS}$M@X8OE9*e%esU&I2unBtf4 zl8(PDffqFGmlf^VLfeyiy@FYoW61tWc)wzXzYcjzJFvVLhP+LOP_!2r+KaIei#6>f zSi)6U$VpTo>GM+cNU{e{S=EH4sHPOxkoarJXKxu{8EJO4ge6$$bBcs!igW@#$q2r@ zv7E5n%zIVEYf0xJV@YJO+5bAiA}lfNSCC{QRk_@p?UTPtU`Ts&eyhEVwuw=p>U73%N{Ayexf_&nHu2r}3u8NM;t z4wJl_Ab-lahwuhrk7W9$j=m|rw+L^UoZnUpVS7+E?cS(l|F+5gH#+-PoBdvbkl1Mx zen(}$*BZYi%{0N=g45mZDv~y`&(Q6H(YgFC;a%eQ2=5W^C+sKw9sP%PdMG$BU@qnC ziw?YWN5O~0e<2(oqdNXfa;qOC93*qkengPyLDH(@BjS&P>1Ex2@(JNX*|YGn_$vvQ zzXgqDJ-ojY8sQ zQv{iIGDJJA!A@DQGXmy?8berwIs=TN?)cHwKZ;8jIa*|yZz zLV_KHt3t3NBzT>0-4eV(kZIRLg4K?y^bJd}#<@)|m*92^Y6`UgS*B*l_tQnb&|I!{ zQ0s8YLCRrY)T6`8o$$oubV^DD$X5;3)sdeE@08zllK?qf{sDRSaZltc0dj8@_d+l3 zjePEd-st1={pykA2k?Nh`r>}{O>u!Z3w(YM59&LiA9^9*=l;?V`s)i}0QzGh6w$kT7U#agL1nSiCh^Zv&B0M zMI4o9v6QAbSXuE?Z?K%P;Q;^P4PI4io~dbj zviGJ7DBiOS@BgDl^0Y|0RU<_+8p-m`=!%8mc4t(@B18Ls(MX;aNk5!MigwmW#j0Z! z-*mQvhQhAuzbH{d5u?WuUQJ3$tX4ft^vF%IbPF3Sp|G5ysD>!h9vl%u`s9e1lE@k( zB}81UB_*utNr@^MG>Jk8JUdJ(RV9DQDXJ<^!YaiZbaaFGnw5~!M@l4>tW__*7A2%w zjF#M_?W96@L5PV4`oWE^DVW((>(9kN^rI7$78s8z2y9AV3@u)RxJYh@iF%=5a&v zE;9+?GJ`;<76emmqyA`r^+$iSe@dOIU>S;FOQ}pn2oUH#!+kH4X&Cb0rLa(4N|T#m%14=CylnO6Pone z+?GdZD_krSv=tHBN*60F+A0?^Y-NPDT7#{!U~2@-k0#7bqcwIylU|F~6rrtk(I{wZ zBeZ50%@%E)3mMiNp@kZ4okd&k%367pkcE}JvpEOZ$SDXb_vQFvZ*c#2p=Gjpqbs{$ zejjVymUR&Bf;K%CzgTU31=MIlt=aa0#Ii zp3D7KcOkmN2JIqyHqJX;>~wjTi(M|axM*>CcX+IK?j4^r*VV;(gg1J3rMuhEr>E-| zmd0u^xR!@vC>NoSi&2E)fQPA9l1osctl=1j;VJf{(+jv1rCv$vavp)9C=7UHsgIFC zDb?~QjKruEm!VAiM~h=bDJ27pmfzA?L#mV-cnpSM=pZ>JW4PR-@K1g7?iq_A<{;+C zK^!N^{CLyv`%`YX){o~_paSJGTzbc&f+xtkQl`E#?B6fXA1kM-vc{9oddcRWh;hc2 zc~+4oVS?(OjER_BD`|2Ss`wF1%|4ifJi+8c%yAzD&Y1;(1CZ`HcKxvPtq;OhT1m{S!>Uv!?k|Si*8( zE$8WJ&U!~H+A|F8nV625n)WQr;@Oza&tW#63pk-(Nv=kgy~ z`I@#bb8pVQ;=RD|UWoZvsCh5KB7Oz)xfZXWHsGXsC3!IxE6Ya`ev0czdh00QC4?oU zsnrt}VSd03Bpe$^vrmz#5a z)zMcq>Q#zr)PU$NQYC5!$vN5aHD9_yjTKik1v_O5lxfe+E}MM|K|C!c``v`y#J^Bo z{2Dc3afReZQ0eRU^9bz(;kclkh|o?FPFl1t z2r}$sg!T`O_Ju|JXD76bvN+W#uZ*(z*LNbc(}Yukc3Pu_2fF4>mNd@jz+c+HXFCPX zh=_BY0%t_T`6%uM!a0e1!Ni^OgS>ks?pHeQMH}}L$pWbJcPN}!_PrlozG=CQ5>%(J zT`gLBcy`qHXUm$m%OV+tc100Twq35P5%e{}6@k7MO<)J1!%pCJf(+}3Ch&&lbKUZ( z69uXHv@1SUOB}QOU1TT4x!ys&!wn7^98QUxb2xCK?Vcm4-ND(o1KG0W;-5Cw)y3FZ zoZ~x-9r?S{cjcY6nEbojj+PxJzo$|PJ!&~0`P>UVxi@;DcfftrE6Mlceq}v?K6oI- zeWkN+!2Qrqm-GjbkDdYdm-F0T*Yk(a9}lH?00wCPKyf^%>-a$Vjg-Q}xDWYKxbZL- zh*I#%^X?gh`!Y&lup}_VD1~u@;(j3tQDEx4=n^gxZ?UXFadaFJ6~}Z9936lS}J_NrvAo${YVS^v$iw zo1%=ov9vQfVR}T{@zcC#7~Z#_Z~mY1##h?&CVf+s(KnWNMik7CcxN=h0z>(LUm7G|E7*bc;puHQUZzEx& zx$ifrKoTY3&FbdgY?Q$kk`}heH=>EQnvvU-f0I!L(w6hoXqAD8BO%!oDFcy4Mc8hY mfoLI>ipV=vDZA}7$#g+MoOdZf!7WPduvI#3-yHwP&;JW_30IN; diff --git a/src/wasm/Hacl_Bignum256.wasm b/src/wasm/Hacl_Bignum256.wasm index b3b0455710a1c98525a21975f2bf9210436ce45f..24cf040619ff738d3363a3ea5d31270ff81a4ad1 100644 GIT binary patch delta 1871 zcmYjSOK%fb6rK}1lRyZe4XUWh<2!`B1B6$cw>^`DyyH0UgaooE229!li&iQbPrx%F zJOg+!Y8S1#s45mn>_VkV?XsKxhpxKly6Up(xp$n0Wc$o{%)Q_D-E*%0ePKS_G~aER z^h@r&x%J+pwrf@2f3vn`oG8kcx{++eJ)AY&ecDD`GaGd&OK#=Xp)`FEF&lB-CMRgT zMfLkG-~wO7MZOf={g95lE-7Lg0ym9Av!4TIZY z>^kjeL%Yu%wu=tmrd94l2Rc3OLYEbPyTRS)mQ6XncY(y-1uP`khPBK+u+igluQCqz zp-*|2(TmF-_e-$f=K&1pMF(ZKL0Q)4A;b_zOkm|C7WXk^yXtAkw~d^a^Aw?0i!$6A z{F$c1m8z15Q6nXn{2YdJJhG#}C`LmCMtMvWNQwf{`KLqti1C@Fgsvx#DuErxM`julf`nf05YU-)T-XE0*B zsigXKW#h^YEpaXSiP%Ns^p-cfbWwtx9a|d~5`+T}W@ZIQC;JY1m^ruP5`CK%bho%NkTa;^>rt6IcTp^dJT>=*hed>HKh*mkz~<&W~aQ zqaKf8Oy`s67aEcsw#ikjl8RM#$Z8y2h=;7k_pmD3VU^luC85HROoXf^ky5NC9hMoB z0ZYl4Ok>8E5fS6CjEC~(F^hRmI(-@o{u$}^D;m`zret7wNpLM`)K{^Ds~%s&HJx8p z`TVc@kKVvy2!C0Op#M~%pA6Ao#{{NA^w;;GkL;jNZ=;t`0sV~-{Y|75`kM~l!Y$4q zo&Ooll+OQ+6Y{&C6Xn(TTCG{5E{K}oyH)zgy)UhM--_(Eu6TueXcblB6!vh!J+fF; zdt~L@;qn?`l^+nqJ|LeT7M>i=5#*y{9znt$dF<<~FR^vPI%!EBxda!hK5vkywn199 z$0Sd9EU!EvJP8Y*7V4gSU%7<2P7CnLPINAEM2`3G~F6Ies#}Pl$;$j*z5Kp>nXjI`iTt}B`IwK; z$K0n#{!@fvx0-)>x4LRHm1N7EP&VX5vk|94+mI837X`rY@b z>#O59&L?n!`}~L7sbS;op=$rneyVKdSSXH|8NL#Zn?|Q$fW#|b&U6_DpN8Szj#F>- zmY6BA_i@UU$ui1t%&%LbX#FX*d>9>;gFY{Z;T(_XJU^9HI-?l1jFYnSD36&2CnR5D zOFl!qLcVc~TZYFIvde^L(G#A;1SVa+hHK`z56^iDQ%co!j6r^qGAApqdq{l9xhg?I zF-k~|QjVY$kv_xNMn0A(yxaV2=r?{{<{KEXobj+@hB=Ak#zVhRSRPLy6|hX2^njB> zGHDt^#iFFd6Ou?tQk5xKq_yu!@iYvje+DyFiESA+&tjJ6FysI6fO>ewK)S=$x zs05=P@4ybta3`u!AjCNf@but}j3H(A;_oGdPJEdU_8=W?Hp^FcogS&A6-5wuQ#^ysfq`bp8 zh{G-)k>C-Jd(b1o%AWI4bfIHoq^ijg>v5k*?9&xEi9Vck`4mp+3dC?!Np^4|C_qdM zI)eh7#(s1u6>T`JOo29KiX+kgg?Xmf5@!PPXVI_7pS9T;zyO~^KgV$nagR-9Y_?!2 z?>tPLclm+@FL-Cmj8Qe<5g|=L2cTuZm@QBuhcuz)sLmCADf}gt`@J5Yy~ai z6w*8i(%4v0X>6Ob$g+bKUL`nKC6Av54>ms|$e#-32olYa%dP@l33&vMbS2g(j4W3C zzvrlZP98jdNm8|!q|5e-eQLXt``@p-I;y!o9~-9_qW>pt@iQrSz0r?gnD3nKWj|yMmP1K z$N0J7F=9*E(LVGErB6}NNBu&fffOGGM8>?sCzJ-xsvJ2EL;OjDP#%LJ3}Q&9VG+f! zF4Pc>U>GA28pWvazMwITaYGtUuJl z(;Vg`bDn$6GgYSr^kW?TEGo>oaUBaHD!&$Vp@bvhh_o{xMGY8VR*ReuCutFNe56+o zV=+ui29>#`|D$CroBo$+h5h^4zdy;p!ja{F6{|wgX^kms50=-lhV=+-U_;8=#HO^o zg%$8)Y1`7_H}&|X9>4MFjms&29?~|JM0Cax6&Vyj!1#4q*Aqn0)Dn=S080r7WifFT zL5^4of?O^gnunyrCIo2*^1f&nJG6)0xX|LqLWv*KKK2FTr`JLa(^KpjN9S&J=jQc! zua>pV7ov)oh8^SE#jd(^LvQaLng3qJVoI)dl4TTM-ZW%WE}lHn@{niK@{x+h^sKEl_)oIQ|l5`PFd7?liGkfliI-4Ml{;c zCe)+Rf;LGg&o)FZ9&{?DAr)!HkJYT_4r=DHH;eSgv2$c?b_us42%E( delta 836 zcmYk4&ubGw6vubhNSlV{2T5wu*5><68k01pzmj6S*Z~p5q)F4XP11T5jHQqUytR!8 z+Cr&^Dd?t7RtW*TSqu|wpCl9%Ze}Qkh=^B=Sote*k-h1<=dv0{kjSn9WhvtO- zl;$yodHLlqZcigk8DwZd(pW&Yat`^a;0#gr$|AC2@PTbnXfaKL&NJIqoqy>LG#M?= zIs29bFFh_l8DL1SQ&!F%`MkFkn?qFWW1-nVu0i?B5Zg;w650Z5m#82#Dsr%5Uoeel zy`)nH5qd_eSXJe$p#UBnvBBV46UD}k(ihcnRjNVV4sBqg zEsXZA(AvASiA{m_!I{t-^awS%eH74ZomWR0V`v?GlcHtt>eu6%p6}4>uFd}UWvit* zjXk^cpL8O_|4mLfEt3llIL}l9E^^C1CxNRTID{^9X;NK-mp`}K_L7gU)X!D;g>LoF zBj2W@@sYO55CP6Ih@j;j#We&k+&+vdhevkP$@rK_UuSv(6BZr9I3{{@$fWOOl5v>y z4W`2gTXY13aF33d^t((lHEGf}nT{c5(Q!l(>(OyVa}9O!s}6c?l;ENgVxX_6-&v%l zOw}z`rI4~z(?}xKQ%x&XLa7p}aYH`Uv>D9MZ0GKq9|QJp&$$DJu{SKoPhYrQ>KV8( Q2p;N5$zSj1RNe_rOu>i_@% diff --git a/src/wasm/Hacl_Bignum32.wasm b/src/wasm/Hacl_Bignum32.wasm index 020a40abca476f576064a0d475e0bd6ffc630646..fa107b622cacbea76c88e7bad3eaafe849ef3f52 100644 GIT binary patch delta 1241 zcmYk6%}x_h6vt;uAvS@9+Ho=bV|d*JrOU{tYyYVoI1t(i|KP zB#@wKOw)|K40p-#SfewGSz*i=24*QK46^DnuwZQZGs1{-)CWuWkBhvd6wK?*98#FG zDJ>kNZDAH^9%;;%X#oo&`id5@$SEmv^S-!we+hz?u|zLcu#6R(R@LK(*082_>sZBl znKn4MVbdly74sqIddO6pwva>yNfuS&SjNVdaMWtc7DlODDl0pd5@pE}!Xb5#9^o#Z zn6XQERHALq{>ct@wEZ1=%=WWvKYPQziL7UT7rVl+X^$y;RmV@ThbLv)$G%ec6i=1o z13U)*Txkb3vRY17$;rygc(?=AzmMq<+rnA%J1{7RoP5_al*l8m)#MZ_$6|8A*z$Uj z=NIRr&yQyTz-i~&BE1>*k@nUAy?lg0OJ@1`7&NG zx8-hM=;~Lzw?6Ru{pqQ3+xNCf{*89Z?y-w_Gqs|nB7BuM!l=BVAc6vw59h)xQ5#wj zJXiVKC?x-kMccw?7N#(dl{jkWDt5@AHRyDr6Cpmwfi84XH$NlnD1rc+6aJIhjD>|C zk?L*bpa<=G?*e<#t-^iisVFVzV?F)oujz@R7yU{P^MP(e^^zR8T(ju4aG-`RdU6Lf z{SF2+{SMP(h}Gyr7(~pa-(~ucrpG%B4>;J)!9{h^S$V$Y3hu zaM5PHhH#lE$sI~o1J~_A(kq~acHX%8Q#$_~ES-Oj^QV#a^79uVg|wS~fz!|H^oxp6 ztbT0jNp_@ex3Gpxl~s>WBZ;ez6JKuyzQ5y{^r@K)SPx_=wcijxD-VwRnfmr04^RjH delta 1241 zcmYk5%Tg0T6ozL)P$Hl}VoU_&KS&^41j8-Kgg{!5i(CUJCRjc|q@a>1WhJ94-HFvp zAHa<%wRi$|y7B=mpTTnF$|_H%XCjN#bf5Hm|No!Ub9#Jw{OM2cr3qm^p-GHjQvU57 z3`}8)rZG)P`8gDkzhcc!3MpYs8wOI876w@jIan}O{5fI7x$1)@{0AjoQU>P#%?vV_ zu_-GYWNl%VXck$_R%i}$qURaqkW<3q{QGy#zdw&Vw19bfd>0G2Yty27JfS5lsogRb zv0R}QF0R;g5BC&vm9QEM>})WrTw2AgyD7 zPbBR!*2}cv#XsD{rjEZ!TO2>n@$={6o5*|dx3Mh@n|7G8Q%k&y9qd-j)%gi{e3_Wu_2sAzXO8`D9D$c{gXu$wVQ%s71&Hc7^~ha zioD}lpvZI;Q6EwfTZ*&~Q^kCQNAgW9B2U6i@?$Jw?w@^)z4Ns^smy9>t61 zhRk;d&wj?c8UsIGy?9|<_MJ4#-#yo4bo5i-m8$Snp9`b&-Ry)BMhBne;x&XR!p|t@i6X$!g#WO9!-B$( zXpNx!Uq?vqU1%pFs@#R^Rpkd=?5P{wbx$|YiEibI`8|l>hF-#f%QZ{h0I7~Hd6s)M z{U%7$Z!*0PeRX;az36l4{Y;N(`hW-b)!}gu&Lw5t;f4%q`ViupKE(844A4R!jMN9U5;nA;kjz?g<77(R}14-T5ZZH&A0Nv2O|`V^w-Dn{i|BJ50i z)8HOalE;&*75>i#Nw0e;gm~kMr?t3+loq#GJcEoUPBZ+0W?bzoYtLxySw$#Ol4sRO z7tkn=6PH`&YOF?dG*j;EZQ?^y;M+^SkUrl@t87h=$*0z?JWXG02%uelx4!%XC`t_6 diff --git a/src/wasm/Hacl_Bignum4096.wasm b/src/wasm/Hacl_Bignum4096.wasm index a3db37774c90287d829923c700de1e0df1b8a2eb..c1ced14d7f5db133e6216d1f7adca5a9616d2979 100644 GIT binary patch delta 1820 zcmY*aNlYA95be*vfXx;O4p=efl`+F+8*F13U|^=3&8)+)&9K>E8)6fZT;o7bL#Ert zHh4)NqC~zU5?P9clv|`IH|LV=U?~#5B$wn8$x@UYQ~&p{M4FlURlnxfd-Y#+fBnYy z-5cLmCE4Qh-4oe&ek}j;v$Zv?EF+m?`I0`XIO(^JDeJTR$xKTmg;lis;#G;yUyF-| zFx_8%tv+@Mm#6^^blF|sDYqKYXlPBEhDK`AH46G7246#Zfv?63v8`|{n^9^c8I~Szz(E}n!56}@I1M@m{z(9vXozj@p zg)ZrJqZ8dW^>DDqp)0te4(;V?y?m@geP}`$O$;l=v9N%y1 zJGWC@BonD00nWJWq|l$D0mXh0gC6@q8e;ZQW*uwt1O zhbWG?tABIS4ZqH9NFd?ijO*fEiZjf(uJ!NVQGz{2i3C@b#Jwa*{8WM_p-J*nn51b; z85yRbnKXkLn#GiRXRlsNxnJy+@$dJ0%_pWYBM*3w-iM|OT`bFU=>tseo_xKNb$kLW z2z?Ehbbt75S`6&g{(d~GaC?hISR_TL1n0hZRqD?E@vJD93Xq>JScpP;3;FI(+4Umd z-C^eW?#sNoVtHGJND)wgLRlI_?z2Bj3Qi;U5J0)QtTQ+Rk~wFng8LZYE(G+#1ErqU zGZ9dLCErSvdw!Zj6)I%78kNi+)h5-ThH6nsXHko@4xN+6r1Lm0y*ivjolW%|tas=F zF7Op+Z_q{5pwitwRcf(ysEM`Eq_hxZIRkY=TY7By|W+bG4FLJJ#IdrVtVg&N7C z0h*nt1D|+})$?T<{I;$vWCtcI@Y9_HNYbLpquxL$SmDUB=t_z1gPOnT# z3Gl~~L0f?1wy?=j+YZO>3fvW{H}^OTs}5}l?%sw_?Yl2{h5P)E2Lcbgyoc$$O_{gp z8=YJObIVj|VUH#v`{{uZ|x|#q0 delta 1786 zcmYjSNlYA95be*f1q=)ZvtwSd8H{bP*}*K`Uf5yS%nXYSwuzkyan1&M8ZzBBUcgTJ z6eV|yNRBNbMam(P<$IziJ|v1n4oNP#IFh9(mU2n`--AJ#nd+)n{j1)q|Np#v3M R!{9jzk8AF&< zj~djwuYRq5uK^9zh(>C1*SE{uw_l}$T7BJU)n90R@!h&srD+iCCNG7nHI3S!x$DpB z(+`An4!wL9PU0jHrdD@;J3FgI<_@CLu+Zd0F&L#GxxO`wVMA-?C5LH5*C@c10tYK; z?8YjMV${$a8sjC#97C+qIL0t;(*!2;uBRI`iAl-i5=OvZt{3IZOAZ20qqeLNklh5h zNCG7y0f|kTcF4W;Ix+u+rqZ7*(G-RZYc$^C4k(DAtAAJY*)ORLA%wgpgSz;Tg4|%BH(#M$I!Ef6MDN|t8^)+DHef7tT7}}}*GbJJQi%p(!rkt^y=eJf` ztVlZ2DFbQbM+W>3WlCexVH}oT7BZ1#Q#J>)9Xf&|O1&JUBh8^))+|@4n}=NF*_4lb zg%@~uA>)M#FG3-TY$`^v!jB@y=oco#1v~{F@liI07PSo#r4pnfUD7KdAX(S+yd`EB zuF~T7sS;B6ol>r+S`_LNPP?obc6)uWp3MGtyy z>O-Hx`#pSs@d1SoVgQ3S4Pi*(!?@^?>NkWXM>;H7b$F~sa2A~&tC0h&;@+_e?6Kld zY$T%|t1$#5t1**L@Hije03Y26OgemY*_eC^r=%C;V;r<8#KDkb@kyFiUCm$|0f%O} z>si(H9A+_R(`8&%_!SSIXMA4a3z)}(O;>SM;n(;ms!?5G!=$cBQez&e>lnqjN9y_k zQogW$CUyxe?y=%njMe)ds~cF9tZtZe6F2DtEV_06Iygz#kck z3UD+kY_cV2bI1{Jgi_&-Km^u`Lw5xWbyp~*HUvLmgWtF(aL-G;FB3WMfy{g0@~ diff --git a/src/wasm/Hacl_Bignum4096_32.wasm b/src/wasm/Hacl_Bignum4096_32.wasm index d937d02cadb5b167e7fbcbd852dd8f020dc3d18c..a088be23f89c350f1125b831fb5ba639affb670f 100644 GIT binary patch delta 1197 zcmYk4$!-%-5JkHk3o^%vhd85eWg_4t&OA>q^X$yF6Cf;*A{HEe3M zCB47i#Gk3HP{(+93|5-6QOPA4}Jf zj-aU}D76Gxi_DKGJ;@Czgpl#e-)vy4VmOSjDJmq1A=VTU%4TjHVUAc5 z!rVi;G!IFajxa?aM@A=z&?!!g+Nihl6i=_^xo7koicp0bljU@VNFjJ3FXKjwmf6aW z!4nrLAYy!sovK?mcd3soZr zs!@~e=b&1Y3Pm_#+>*D)UT9Z`ibCMbE};u9)JmBya`PZOY}zAy`5mkic6hSFrM-GM z&DiqdgV^(U=$21TPx1qTFbbO)Q)m%?&r%oJU4;H`HQMThVM%Tbb&E zFHZ$p;IpVGqT2FQ%0W40No_Ns?I07{&d?5YZ04z*XhVlZ?JS~}|x+nSCrXfIwbl zKY9_cRQAh8Tp1<5+`{tgECw(@gT~#Po|}Qo%)_!DA9#OkwyR1b^DbaCB#tUed1=%O Ql=9#-1#FR0XbWX(ze5=cWs*rSns7HHDnpsu;+U8aA5HYN!I&ttCd9aZ ze*~*0B*ZmK6aNJ_822t*xFXSW`-XPWr04Y9-}jxNS2x<#jdpQ-=%10=5zQinS@Xl; zX!9KAXdd(C=He`9sbLvQvaCJyE=!RV0vdQPr+uSeCF*G_Y#Wnki#Z6L(wQtYck@1;(z^h7^=$wRF9j;Lujv#HJKQ+G5OB zQPH=th3x|EU`NK^(JppX8++IQe^}Ds!o4EWMnUn@>P4FUgrXAF-}AJOHCbBmmROK7 z$e2G*WX8>7_*b=g=p~zN|>hyM&a!^B}100y`dUPm zDLsRbn$+^@DLqH77QAqVacSs2kAELLV_61r=2`wgTf4l@|MB|%elC|qz;o&|e;8jM zwN|93aw0|LgaQaiRKA=`&7d~4AW+q7qabccA%l6A+RBuNY%V*uM-_sZ1>ZJuu4bKSy84CWni{c za!b7~Qg^1*XHomnYf<}|8bi#b#?gnEO^w%4JuVdvlvB1oQm4qMa_3|Z6>rY4bep`#c^(uR&IXhfZ3ga_S7O=v`uc~tPdk~f0J zc<^KLpQufqL;D|)LI(%7Q(>% diff --git a/src/wasm/Hacl_Bignum64.wasm b/src/wasm/Hacl_Bignum64.wasm index c848593e20ba1382136898fa1c6d4f91efc54688..edc590b15151087ece0332fefb9eb32fc02f45b4 100644 GIT binary patch delta 2221 zcmaKu%TrWG6vl7EFgg-M1QkK?JE9^Wpok0u2;3`gubq5m#aF!x<(L3Qz?=QA@#Nm_iNlO9iZ^5Q`!q{eh77!NoiE}Y!Cw+)mY z+=Q)`u>F6VN<%fMp>5bkwLUG*_gli5P8ea!2pa~%RA(9#NpC>}MpvN4G&XWH4H0wU zM6A>>s2<||q5<`2@Tk#p(CArWnzo}6+g)lxliB?GCN-m(&!m&4Z z>_DqWZE~=w9qn?}fi`rw)XBk4kGjyMHtpuqx_MiVdQgWJ)Nxx0jsxH$=-%M47}D24akND_>3SeKoARR4dWxRIzwb; znm-xp55AqCedx8E!GHsUhB54CHEyVhBC0zaHpOWg<_3pNqbGGkQC{&4A<73zY+WR= zi=s3FA<;)MLSq=U(rnAHX+QSUI7a=K=4LVK|7xz`-+!4AG3J+8MPg*G-O31N?c>Wz zlYJQz81aWYM?~-3TxUl{_Rn|u*%hY67wISFzip`StNVU%vSfI^p6VU^0S*7+jN7peRj z6k&}^08~CoSjp;hdbNBZ36NazW3WVm)?x{Aq&RD-*c1qL1>#s_8fN~4DJEhYjese5 zB4wm?$a=q6&zH49y_9>InNA5xP|Uk6P0~Iz^qLQjmPMcAM%^q!$gH6?_maDDUf~_vqpvpmwC(WyHYf*`E zkHX9wR&vxKj5?Ph{OqOw>UF+>^9?HBhz2yeY}4&3--KGNR=rlMi3e0@wVJUBm722! zrD)bu(?OC;#8Qs1=Fws){dQ=&R#=jb9o>dDcD2QpZbyg5mgd2hmX@n-bfVj3JGZlS z*`YgCbG__-%cDMSzE3sZk3RG}E)B35CEcJd_#Y?sYPvy9Cw+-lO*h2fls1jDkLiZ= zbT|c7Z@Ty1Ga7{IL9aEU}Xqvm9R^8i}hV4=uag}#;enxP?j8GM`f@hc& zF5M8gAyipH;JSu%G-N_lNX+vZ(-k;}DGlcd26csAd?HXLKeJ`NIkwE9Puc9+oMvEx zZVI;RO&xk~R*t0J^DTk(a+wt5Gu~NR^|rt*AvL+ppZ7ZgcLaSdLv&YwCwGNMUnFNX z-4oy+8N2TYaQr@lJy7Wf9P|ZzjrdS*#JhO(NQ&`Di}6_S43GJ5o(MeAl~3m@pUKK+ zy7IZ;8J=_H3xOB9@}=yItcdy$zLWuOT`TuWfU)i*<@!=;_bR#gN+|Xi>m=1% wOkZpM3>0V5H*<{9 delta 2174 zcmYk8*;5o(6vq1wBM1Zm#RXjWjv@jAZY;9X4amL>i^4FtuSi637n=sB+cmE3u1S1L zB~@0X!b9?8@&}|+mFJ}LmUpd6UhefNG{yniSDekZ=am7Mt7 zcXPAGYX5z5&iuE@N!xQDzMP#kHU?%EI|(xhPS#AKlcL51Cvhg|Wcg`5B|p^u6G+)% z37g7Lj57bf`oiRLlv4#NsM4n;tNg~wRR7P)B@R_5G@=?td$7?o3Je1QRGW(@#tRLD zY9Kx?YEgq)kLoN3b)F@zQa$QW?@|LA%!ao&sS%CxdFH1ZSf6gt1dE!{L@j7Wi$}ZU zVE0oiTII40yU^xRJ4f3+>OhCuw3DCK$=iC=g({e+;YzgTa3JcQorhC;HT5I-Z4l8}g`3d8tJ_^0cG+2${X z`oT97G>jh02?ZS(G=dR7vo^mhjIi!-#1toKgc}?&jn4Q9g?Ys%gfKr)V(21?ofD=} z2#Fp+#0uD!Vbd7KXfGoE8*{6O_`jRE{QbeK5~KcRYn>RK@30mnrVaB!1!Pag1V;Tx zTUhkW|Jc^LDD|g%eAluAGb!p`{o$m?cFjNrWg?xHA`?qJS|$gZvXCWL%drg0U0T7x z6&|g`N+s4RWFXz6Y!)+HiJ60JJE53uxnU$t+FR1XI#A*=*fr zwyw#(ZNV;&ZOenrwq@gbY};0}yKLJQba>Q>PSso&o7wcJo7?MF&G(=iJ&sH4KvmGE z3qo8FQU(3&#D13sFre~->~cldr|AYIU6ZC8f`w*{w43RM^mI=gF0cSM9EanOJUWLj z#E7N~V_4FKZMMxQduEv362X`!4TCY8_FilZujOy`et{#$r;DEDmaNln(mmUJ1ObHnr;Fk7}ZFhGu?!q zo{G~YEYMBH={OX-n5Q+|8B9vJGd4|Oiq2w^&fzT1c@CYIqfHlZftPd<=W)@cOB}r9 z(Pdm#&0WD1COw+w?x$7vS22yNF4?eEen#gV&O0hUE8t*OxHKm)CsfTffh(GDMiX8W zJQm@MW{e7)#*`*=1ye?aUU&k9^37T3_eGXFbX~-qX$B|ghG4JW(4mh`b9-o>NGf~9yYlon3}&+vqQ^Hkueu6(vo`CL{$*Oe~>&+vjPUkbd`m9GSR zDHK;d6IPXa*S(SfZe1()T7a?c#N_%?YWF&}_(~}Dan?zymrh@6{zX`yN^gY!S6Au$ lrlFq~r~W#}Z}x-$|M!sOe;9L9GWY@fN>4_nKfU+g{{ihM4^IF9 diff --git a/src/wasm/Hacl_Chacha20Poly1305_32.wasm b/src/wasm/Hacl_Chacha20Poly1305_32.wasm index 57b13d122acc1359ed36e6b0bacc393afd4bd45a..eb45d058f670de2e077ee014b939dcfe4bf2427d 100644 GIT binary patch delta 68 zcmV-K0K5O~JLx;H#{vP7v&aJX7zvq#fPer30)7^=$Q!2t3Kbw6K>=$){{eB6eH|?W a9UvW(tsNN)9zg&FK>-UPA0Qu-(H$H|@f4o` delta 92 zcmaE9{nmQJaVEy8n@=!(m*AY+($K)b#8fA~`GoXxMm`xw#%#y`jD?D_3Ze?KlPhI) t)#ViAfFf=nQI>iI76n!XHU$O+Mn^^gE=74q23AK#ZbbzJg~?}SWdL#=7_a~U diff --git a/src/wasm/Hacl_Chacha20_Vec32.wasm b/src/wasm/Hacl_Chacha20_Vec32.wasm index 8ca2ca540f219633048e867c05eff7dcd8f4ad7e..6d808d9a6c28d619538eb10245905d33308b7199 100644 GIT binary patch delta 802 zcmeHEyH3ME5IkQD5eg5ZBn?pPHZhV##oZ;ovlCxjAwuk!6_EMQ)>ubqz%*T*ss<9K#qVFrgbyRJ6Lq0)EM$@BwlgRG9n1nL-QaIHi6i f*_Vw#Q$lq=yBX_#p23p(_1#1N`2Q#V$WO%I(9oMc delta 810 zcmeHE!AiqG5Z$DqP--=xf{4P*rBR9}lifDENi%IO;>j=Yo)$cK*Iv986t*AXvCtpz zOFa7-&PrR;FYqGlVcyKV_hx3_d&}O^`rdrC@V}a^*ZFKFeE%t1y|g|XC*VUwA7Vt&#y%(V}ekIG7 lwZX=W*87dN;rd9ff+Y(a+J}Sl|KH%7?wtA0e&hN0@*D9Un}h%W diff --git a/src/wasm/Hacl_Curve25519_51.wasm b/src/wasm/Hacl_Curve25519_51.wasm index 0ddac4a401efa55adb1289142bf33887f2593393..12a0dd5c375c47fdf0842e4043e0be5386fc7b75 100644 GIT binary patch delta 65 zcmZp&_-DSspM~k~y3K(se?&PyHZ?RbFfr8$ZVr^N_5%Rr&J?Bq delta 69 zcmexo-ej@CpM|M${pLWHKcbvpn;IGzn3(E>Hit=xF!BjIGG;seXDn0{QQ%P!nOq{R V%P*=R3KVezi;5|TP2M2w2LKQr6qx`3 diff --git a/src/wasm/Hacl_GenericField32.wasm b/src/wasm/Hacl_GenericField32.wasm index b8e4b468986347e138e2a445c00ea9e029735889..52efafdf1d60e8bcb44b3c33f4fd7f2a6e19ba25 100644 GIT binary patch delta 697 zcmZwE%TB^T6vpumQW8_5h=^1~z5^7wc>@I%FU-=NiEmMji4TCd(}i)senS% zw1PYeK@FO0B~(Ja%DSWURgK3Yyryv; z1?eMRNL}d-)Rf*}y@_U|w@^nj)Yn*VDSch*Q<1)*bsmL_*;M!zHWa?acpGhvkEnx9 zv_pNH^^VeaU`yeW&HFMb0TT(HCxt02i$`7kJiIjO@q6_qgTKc@Pp@+q89szHd#Yw1 qyQ*gYlymy%>l`}39{OR;A=ezJnxlYlsrP@)@kBL|wrJqB>Tkca1B@2{ delta 662 zcmYMx%TB^j5C-5DS`(CjfJnK?f4CMDuxg@lWn38_ z#)S{y)+wi_uG*Qj-#?l8nSM`So^kk9b#H@vc}B zNe=IR&ecn{ia3&UOHr`OQto53l1?MeRj$ssHnUlVD{RgEY}TACvPjW))L2KB<>+Zd zPhlX&)pM?0oN1?AG091D4aVYcs#qQwQ3qfFIlo;*-t%Wck>)9(6wXsd0VRJP@;8t} zS-e&p>8iFrm~SAYZ8 z=xx$F=!AL~COUz>NqSf4TT+jPdQa-q3Kr8B_y9eD4+tM(C~>e6`WOcKHt8dw@4(=y z%?$U}Tz7WmHE2i7qVrgb#{X?#$?feSLq!n2FT%&z6X9bDpI{=x*#Y)339=6<`#@wL d`GU%JHIi|L%^eUo?tlv_DlQ2!?xNx_5y4%!Ty*YH0@0x3 zj!Pbr$5f>%ugPovh5R|D&*>RbHC283`+83G@0)w)ujD_;KYlHuxj%m^8b!(H=TlRW z>VnB70Zmd+p5^Gj!7?sEaiYLaM0_qqDKAGcmti@|442#D^9ro6-Aa^WWylpGRv2D| zRooqSpB_pfW+e*lVPi z3}CxRCQML<86_xfi$uU&mz`fZ8r5ixMr5o@$i6oyvKcB@JoHcyT7N5`KyzMUF94>@>QN)XeFX56~ z?lO|-H#{iI54z>AU=UYAzKW~Pzo!0>_(RUWjv-tRIY=Sp28MB2>8>f=u%#PNx-?EB zsYpKvU0RPf@^p(9=tlB%BIO>;n+i9I5eql!^BBhX7Do6sZsE2G_>QeU-^E>N`5x}z zUdZ=Fyl?mc9=PQmVhkgO$7TC*xBUdhF%hy4-}#g32jU0LpCSa9qL3dE9+8`QOn9h- zlS=rQq&8qw88d{N7*n#4gfc_g7((2m6vwl3jTONY%D z6Y6c73*$$**goPjglV#Q%#hEsgjwRBti;aH AWdHyG delta 1569 zcmY+E*;5o(6vq4DjDdoH3(A0~-;n?_0^$aU>vZFWqT&_=+)>eCg5WOF(yRB}y}Y{2Cp{*9-GgQ%-eJ zrX;A$e3fl&sLJkc_&X@~ihR%UxdIiu1m(OGOR!Y&G97$gj^(X{RbtwB%)VU6Sll`|i zmYr`X4eC)3mQ$qJfO>9}7+;h`93?J|xRKHBNYZuU@+<4y2;%0rgyV1mb+*p^!g@67 zb`ug}6`Fi*Ml-h{!5h$m4T@WJ@OdLP>S_~Ou_@%uXbG@c@fK_`xLeVTgyJ^gZ8LVX zqYdpLcc8=Q+pOLxdZ*F1qZ8Xh-hmxP--)f-p8(seTRSD7$-1=*>(OjEccT%zY`Q1N zb5Tk=r5Tb1v{c?d*V1`NYC6y7E_Cr;BzYh9VxQvuI{1752Xu81`*ASjZfSNaK7>PN zxx?r}QgM&0*JGAHf*u?RLOzP4X5g3|=#_z9GjJTeI3Dr|oG|)H9JX}FEZs>>=UKW_ zaM5LvP7B>Bn|_;zD=NVC<>92I|GGe)jx(07AAOpx-{%1g@F4p5ECz8_@i`rQK9BRd zx`1=I5b{N7UQ~Pum&|gPF@QeBL$dskS^f%!a3$ocxN7ukRv#99*yz_WjO!r>H*muY zq;T2NU9)s4P1kSfMsNlL7AY-sBR2gmPq(N*H=3uDR_?*PY2n5&s^P|b9>+M}!YJRy zE!MQBJCcL)DZ}d5Gh~g}XF%ZB5+JaQ^1V=4=t&TF%8jkF!s&UsmX>--w_KCEs*eyA3 zciv9UId4a!uLr7^JcSS6fAOUbV*K)?IXVdNJaJn?eXZxCfX~z^`-t~ZLeaU;iScV! zFl4TX`a+GepLjo|?#D&i zNFT1LhYL4C^>)V71_Wo!8%SL77&PqJ^;E8(jjt&(B}=AnV$%~P)3-3aZIMCAc7hne0K<8Lve~So=Jl56 zTbfcb6z9{@2|v&V%O;8r9J1L|O3sv0y1~CYR}%?M zP=g!X$ZvoCTdBwJ{F6_AI8Ho5F43&-=USTh3}yHfo^d><(d${1%xA560S0@F^GR}B zn)O}L(p2juVHt%d4LJrG=v4?E)-k<|F-_>0Cf1AzpA!bOU)(I{2$;}yskaG!XOr^f zoXMn2noP_0CIk5ak^wqG8C^GF0DDx|@&N8tT?<1%sJa%1prfh>MwpCrX@MW1z$qW$8nL5{r5ezV#*JJ9evlw-5#QC(L8@4?t5}$ZJrcOaSDy-?q zKK#vSunW;(7o)*0MT5O%nGqOK=Qe2;qYgqAjXDdU&Qe630;mH(kvc^LJ4pC}HW;X5 z4jha+cSL9`6)F~9HbC>w0Q3r3O&K*$Z7M{C!Zll+GD4FkB^8#dXHy4aLc$cWN z2#;>(5@evYBD4@wXIaOzyk<=JC@_?$104a&N}Z2wg7mXWkW%WD@_6CR_OsaVG^EpT(-K nXvHg-OH3seSta(=R@zdA#m@S-Jg^brh(1=d@#zmXzxv+)5M=Lq delta 2020 zcmZ8hO-x-?5WYk2BRB9OSH+r0qcd7;6HSa85{)K3i7_ROF>&RBg)0{iVuVLX%HN~a z)?%qG+#*G7DbOEKE3{Ccv|38}=?`ipCc090rYjdlcSgUNckg>b^6vd+=FFTk^WF2k zq5IT#pC(@nDpr5q*X5OtjdgW-B_*9}QRD0VQ@;d{mQ_5-d!66lNPiiYD0ujpnpWpY z+m>D2=2Er(F6Zs^JQjFrA zI=RDnd$`kiuQvKQI8-mW==t4GRd*5Nm(vZp2+28aZ>X>Fd>p<{mt;Tj{^+^g)w%bG z@$1k`U}rw(eGSS1;saDJ2Z;}o^ITwKpe^}eQ^c1;#0a)GNH=k-e25o!Q#`mFMz!=1 zhVDTunc88TBlHSy<0FKHgeLijSoDyihQ1s#45ZgEEq#WeMC9&ya-8@$MZZ<-2>OZp zaZpV(T_p2tfQPF{XS?C zcVQsveW6k(8q$e|HcVv1oi>7e88!@L#4s(RhM}CXolzJMbCz_MN@W5iBh={B$nq#P zW>anl7Hc)(rCPmX`3!bItRvgfvZ?$6oNTtdT+S7?bc26(|CmbY20L+r8~*9{zf0AP zEM8#jIm!na6B+K@Kh_{9$sBH|x8krK!dX5E;Qr z8aN6J^(d4MDNWCUrg5cdd;?ARoCsiiTo>>P8wKMicqGbIrQZJBi1OtN!$7_?Ov?qs zP`&~(l8#bF%Y~76Ol|EB$-Qc8UnuBPTYE!Mq;_Z`$ViX&_ZIq_(EcV=Kgq>ZWlR!7 zMR-pXm2uI|_vDhHFH?qrOdF=pSQR`(^kA&&%N&bP5 z0$;D%oT;Xms;uPZNUo@e=e2Nt1DEhjL}(6XQAAgXuf`&}Mtlt-x?&kc5q(W@s3@Xu zl2B1Z*AaTdDvKgoAVx8gvAB?kXj8;QbR*eMC%a+y2uXqD7r(al=F=qMI?t zOEJgGF~=)0$E&t83S$x7BHhJAgpy?w(GoK{12;*=Z^pY diff --git a/src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm b/src/wasm/Hacl_HPKE_Curve51_CP32_SHA256.wasm index 000976b0d7efa99286a8a02cc418b3c1e56f1721..7f40d696b3c8016523a7564df64dc38b9bb78558 100644 GIT binary patch delta 7898 zcmZ8l3Aj$x8s2N4BT9-(h4l2Th}%GhlsM{a!$F)7LUc=|bjVm~z(Gl&!cK$AAtiGs zhlny%=6O~pH(kx68>w97zVF&=t$pfw`2X)+>l@ba?r;C^SvE^8pQQ@hWR^bk%BYb+ z=Fy@KnVrLnYD(=$^^H!cK~l4J5DcQ#I0&LZ*O6M0%G0&t85Pd0e}$RVb)_yGa_E$q zAE{DNP*&HIdb+;UkAfTwtqybpX;8k5zA~P4X_a_-sj}G(rC}5#DX)?SD5e`A`_&oQ z072W#>?rtW5J*E#^~;lnSVZF7$U3ur&Nb3dHRuUeJO8MKwQ?hM*V8)VdN;F*hJ`1WVemQ&b4TVLP?>&yaYx0klM zLsUw4l#WP#WBh%Y>e-#7lS|xLIx=xbB<_O5`1XlAaSDk$`NTI_XD05^RXXWz(oJ`l z?$Ks@`ZvQ~3tJ&w57=8_hp>A}w?5!cz8m&;uup^CM|$ad@SPVm(0!$^ z?kD|lDEB60A`$qH%*rbtkH|bbyT9~zOC2DXLa8UjIK@GCbJgEG^ z(q9jeA^JYKFWP{G@G-d`_72!Qs0U#40J))|uz7&o(1WmRC;NE_=5Y*jKM%vMmu%+| zoY^oL5|wsGI~*510;?NQVs$l#16no?^QK>Fz8TUZxYYATD!E6GQb>8GOF3HMKKj;a z9`7wz@w_ogTIt6*U(wBvRguI8lKJxElynf>q70F~<;&{v@w_X{c|3!O29XB5)=36^4#b0)Y!I)XR~s9|3Z;Rb^>Jl@rrc)W zVN9M5_?$s3aR$)y-nA^y3kDs-+?hZx66vX27G6YAHLyoN)LPMyja zR6cBCPVjXz7P?EArDT@!fpxQ!H z^mJGBO$D|Ua$|)hH}<9?wuD272Y#1t*+E^%T~=A7WUA6dCiz(|`C=87i?`J?w5zgI zp@AVsIW(l#d4+f{Ui z-l1d%%k7NoR;bZ#my%t+(r!+%5tj* zq}=<+AjiFr4RYKoHpp@B6Glgw?miDHIq28;Zv!Oh51$&x)12cWB{_$@=VwX)RO0y` z<5|Xe9#(SLJAQ6JM&kGdhV_^6nubYt`bx=H%Dg<-oxV1RorGTB7{nI{yVJJ@xmV72 z27QA$cBk(Ra^2|%@5%1;qe1L=>`p%!#E!@Abfm6E9^fQq@N(voQa*TKS zOG=^RN{;&-{>ph?EItGM8!wiYzgReP9v6E&z1ZJ<5HGgQ2`1G4GZ%Z3Y5q`AHT|cO zKb1bEIH$ z!-V?rP#zEU*igoXzOg-FkR#WV203z#Gsux^yg|0HO)$VUwxp! zPfrZ-2ubIC-Q5aQ~ z%sgEbN>Qj6hq9Q}mtcy#EDHHH&V4(Sw=t5O&Qkht1^5i~GOmD@Ujdvsj}^R~uHcnN6eK5IbyCd$Psh3L6(?n3~aXR*b);l9(ZD|w}S{a#M}_doKSBx={+$wh0*DgvkWgQZw;lC-e%^j zI$CZIB{Lq7IMm%0%3I`a6kv%sbI@{kDC?Ep6UrWc?t4jIy$=&{<*I#3zZL3Y*hOQj z9#d+6JgnM5buzyBg8piI(Z&lVaj3_I2i5xc8X1nErScqyZn%g;msjV|-0DX;H0@#z z4XcTv_(aVsO086-WMy2ZR%P`;QL9>?gcsG8D|sFisI_qjB?M|isY8KUCsvoATmLFE ztFM-;;rdsB+DIDdYvdX)P#a5QaI6UgYK`nH$#TSM3S9_VH3qA$#kJtub1I8dU{sdB z1&T;Ivvyv#WNG}W!#}Gkn@Ka>T$)Q*{&d{}`C3X#Q(ctKfzWIfiEfP&txJ?>1!S*1 z$D6)f8#AO^vs}gNg-+nLcIfcEn z_sMUx&P;xGCuy%c4L;JL1b2rFTA8|c30`@l6R9XOx^{_yCXTiee$lHLh`OY z`OVgu$$RvWt`LwtHFYB&Dt0>xM?O;Qb+^KHLi0B18D+Qw?FHp${cndUiTfQ$b*JeR z5H@FBQZ2u?^!7sYF4&*hym#Aic8or--CO@2*hk=#iSazxPH6Up&EE;^oc*M?hOE^6 zAv6cb06kC!;$Q|P4QfELKm#B&&%fmC?7=eFEp>_X;0p|L6klkNB{>u-939OI zmD~dzB0kygGpTMRw|6e=F}9J#4ch z29tlI`zkw#U}n!+t>hf(90}5c2S#D9K}N5zt$0op_6Ldz`$Hukl0IvpurJPq7+!~f zgf;6qg_7_AYc_CwwEX(u%z3b8UCNq`X%Lrm&L(Emo3R)OX4%40Td9qz=WbK7&DYw_ zIo85w0M(0Hw0tc%a~`#}rL}gZL9A727c=VJ)Yf~H>|w3F(AHJ>n#S86uN<%1rl5!q88OU)SLK(<$9!eR=asCja zqs;iSf1H1-B=5e%X2?rvXyVWPy1y{MD(NqcqgT?2K(=@HwGpURNq@slehVdCBOhw` zyHrI(4Ffq9{ewY#JF@rvXpnoKLJh;yspumHx!wmcj5*i)em2Od=%WU)pR)HIgNn8V zsiJ@Ju2j*-iS(~z%5O@3)-WbjDiFplh-=7)G#(RbNaAs!h7=y3Divs9AV(i)VIW5zh+!Z{ z9|&9^+cF`20bI+RWRUHc(7f0wl>z?o_LoB$8EQyuWcTuW8c)7} z*`cq~wNEqp*JeiuCjZtm6Ag~LD=^tBL51UwCL{s!Us3Fgh-ail9pxpI# zX5U^iJM=rS&5jTceD)o75W&peZ6_qYhO|d=FZ&_uGob2`oVwm0Jcfa7a)fx`lSBNQ zK?E~-UHlnf37@wpKz>|59*6y*Y*zX}Clqx-Ec4q#+_WR(^Ju`RCow;*ocF(SS z$Si-zEbdj@`0d$aMn}b;HS{g6icf5pm>ZMBrdH38Qw0CpMNutr=O{`dZJ(|_qRm-9 zw#|B`oQa+a_7>+7(qH=)fqT=!-`au*)Cw2|4kxq!5IiKsC(}5+&Rdkk4+C?JmDqXZJ5j_vX&np;S z280!mp=&S$)%mtb&m+}k<bPh;cr-fCA%>D%0PaJb0+z5 zeWg+_O&V!G=?CJ=Y|fHKX>D=q@+ztF{Fh5V^7jLOfAHgP;ICpA_^Sf{70#LbRn=0Z z17v_+DOV<&-3D9*dp&Fex~pMtg&o7bMg}BBAS0jtRDSBC{|u&N?$?59VEW0Aj!)|v zmE^9I>ymwr^yjemI?`Xbc9LEX`yI3w!5#$r6WGPDe+m0D*eEl-0rp3*kAVFvxlU{F z_r~OG9V~-&hzvnuZpw;@Q{z8YR!wPoOL5ckp)%CZbC?WamWE(-EwY5aAxlHqg)9vX zS^BkeW|nfpWvGsj5qh)SoNUBIcs70mdpm4q>9??%JuYY@Y-W!O`W@_c*?N8t^9wX{ zJ%50GX11I^B4MLsMAA5$d5zwJ&CX$Vxq{i9dJCYd#!7^RNLHSBa=Kt_N=9_JJj4SJm)S)F? z<6Qb6Z*a!wdCV)Vuw`&&KPt7(gj7Bd1~HYn`6Sc_jXa3GNe&+_sx6JX*bF{o+hV_W z4;y*dgnE9&B45#uTI6f`F^havFR;kf_2U-!%3f$>AUIudKVeYai)_#%ebB`Q1zyjE z)fX;ou_1N^gCPNgRX*t&wVtc2ZZI;_=n_kQj3{H+bBvu-qqj?v;iQxK$)yRY<7x5$q3YAd%9e z^;r-$N=ym=>uMvbf%!y@bd5>YM!MF>T9(dpY3H+B@oKGHXJj3mvZ{HW-IxOWFl0Ta zNGntgoY{}6S(mSx7s4RUs`&;I>PE~*Uo`R}qiss3l(wzhY-DqYw1r)agx^TFG7_y2 z3C`?Cq|NzAFNHxi(l!$6c9VQZcNp2hNH3?g9a`FTy<+5*5bafVGa7y)-N|UQLNqwD zAJJaPM|&*{Vzk^Y66)(lUN`!NkvH-M|E5L0;NP;y7yNFEe8KPG=%mK)w^RueXIkX>x3f+;|e|?Xlb*>x5V)#No=k!y;cnf3nC|&_s)T z1x>QZU73?D@K@%Y7P%Yqu9$T?g|YlAL5~`p8sn@?~0>62_UQ_b&WcW zE2+LamIFrbvFzXVMLIoBN^}OvXB3i8kBJ>Qh6E7E?{$qj!;=3wmamQ8XUSW8@|jHg zERxSEB%c`*J8}#OAdt^?jXI0u&F_x|MZgL`X0X$EMVOOLIX62gb+OdN2`fHAw7C}j z3yAeH&!P{2IFkn~`WUDPXud@}H#{j1TEx@9*Mx^G;%VSXdDtTVn(#>Q^?TiRGF6QFzuOe-u_*tPQP{xAZz!Dn3o)@PD2xdp6x2r7s2gmxz8K46v2L>L;V5j5lOuHt$+r}e zZ;pu_Ifeuf$hW#i-9qv!UW%oWZnNC2{7KjzOR?@C_l`ntGy^+s3<)4`zw8=yhb@6u zVtJA$Es2mAY&}lGtFhp@?Y4O*Pl~>VrnF_N8yY_u>szoJCbT+W%)_cs?=~ZoR zGTR$AwVBGMt54ixHm03r6q=UHbT(b`eKxf_iA@Vm+RvsrC$nkPDQHR$ozlM1YSTzo zr|nNY-aOaP^VHq(i4^$!G@u=&o!~#_U+^KJQ+odCZO;0MZPs(-od5ffaIT!Iouza5 zkkCcCpq#s=cb?w1yiCgc>du3PM+tX91)q;p;%}(mGIpVQ%R=VZPv>-RyW?sSDTCne zhBsm-V7p6qy+AIIxb#Tv0lo^Uu+hcnh4?7YGZF2D5WNaQ^aOHSbzv|=xZbu&dof(I zi{wOw{MU;?(?|MXEtlX*I6jrrO3M36U%#G9r4Ogr2cG>941dFV`mzh_=^FxE=A0Se zxGL$Zm*Z8szw`(B753QU!$8y1T9;Q#wdWro{mI`S{8xe>e*=FtyTD%^_^)!#7kPD?C%eyU8xhPTS`Mzr{^vZpvMP9kj3a(V{ zkmeZZl{+N*RAU9Y)>-8aJwDe!hs!*^lpO&J|h+Tg69WD0GxjQtMzQ+3+qFxPu+HATG^@UzjNsyK~=ZSK^gxe zuc-fNkyq4E#^~eogqPCC7I{StWsE+rs6VyHUrG?h=<}BngfWm;)CVl$MaN6&^Y#{c zMg2u^rK0|lNWWtB95ix}`}yxsJrKc%LKYx{4%$7%Z=_#yPicicg){rHr-$-;`b`+b zJv{^Z`H{c_{(0><8J$C*<<@SRs=q#tL~nDX%4vh}gmu?p?lN{VU}%5`}R#VRWpeI+Pan9H-mgd|;smTU%4L@VqJ z@Ebv)Vg|Ir4B*Uu%wSc129U0PkS$Dz*Hy7Xw62L2Vs&l4Frij~d|^VZ0{Oy(Tm|xl z2?+}13KQZKz!&BQi(Fwsm7*{qM%NS;<|Y>A$t|hgrJAOhM>P~; zu|gNZA;?W=!Xn+t1newK0GfeaU1CT8;SCSU(Kcc*vvK;aSeClFL`s&kE?^|qE7i`N%W5>l2+`xrczpA4!PvkE}7-aPmxyIMj|~`+GuGa+7`px)*4<4gjJB? z)DQ-$(`=KrB~|x-Nt;BF$z&~)GEF2;PnSg7fww()+t=c42jqB94`JXv!!~Jq@-{4& z#@YcZI}(d=c0^)l)WUbzf==wh0y+i$vt2Uz5AGtJ zv};mFyGb{2pOaS9JG!j9boazPq#KF5fw(7#@g0b}vkS!C1M#^onZ!MMNq6lnz4bgf zFWKNW|9r%&5gX85fcOo>G2#oQcarBf?;^R#SHBpc5%ZbCaru$N(KG zLv@%8OV(i`d_b;4{3c><()EbBd0fzN#N0eC=mx~ax%J$La1WZfo|_Oi&n@R>?AI+a zG^y*ibp#H1BxX0V*6f;&0CYmUC4^zBRkle-a;l9+8R@5^4JZ%vl((9sZu;)=?b22a z8a5hZq=VkU{)WDPtVtv-Kf(4l9%rPh^hqKaD*X%U>)lu$HZwUW-JD-kHQva0ll+Kf za0ez>^aT+2;2w*3#N35@E#d)kA0}EvDe!0~S@a1IH)FCzlmL%6wTKn!0^JwJ?GH5N z5({^svmW667O}(uKo5k}`alm_v>SZ~0zE{eQ#s3rO|m1>X-1|QeZf=Tp$37?f z_T>#Oh)=Z}_=IhX{W&~olt8AV{PG?V9;FUJtNMCR|u`MuA z+d^Bko-g{ef#9mSu$qqgHd3h0hq7YP80ZH}K?(4TMwL!YZrq zS%d3)nN2v{Cp>5H&PboiWt6^Py4}rr8cmOrSKW)>nz16ltME5QEGiw>WwgnrHVF^P&b+6 z0DaTQo2;}M9DM!`;PY3p#mJUW@hzsXB0eMC%8HCaMI^Hy6}My+-wuPUHn)*bw;S1R z^c^GbWb%90A}_!9Eb{W(VUd^L`y8F*`y<_HWM`P;2Np=~hWOAX?&lLfGE)9g$o$v{ zKwd8M-!^l9pSjD(u8{bN1upSZH0x(+^ON{vqv~@bpBwuI;q~-|MZ8Lw^rc07i|~5- z$|C>X`P!l{(Z}oQKNk7x>6?(r>*-sIc;)eW`pzO=dAy#!FR_pXc|H9%r1E91iB&$M_i3H5*WOn)QI z?vrkog$)ZIjO90@57~?&pD`6h)8bQXM$sd&;52v?m?KW1pn*BuJi>Mn|P2jZGvEamBPy0(*c5*O6d_TW21&ag8Oqvdd! z9TL$qQR2*6B{~4PGcONeDA&<8>6t9o@GLn}VJ5W#G@Ye0)^j#o=Fw$cq>EomSLw{@ zbw*Y<6vB5{OBZ%wEnNcnIWC#x2X~h)+5E+3LOHY#b1o?R& z$9Ev_#V(Nd3gqXzWRmx|KzhMGUZ`}9e4=>K(KhmV;x%^(VsA71$c0Is-_XABelGt~ zgpy0Y3{;oHW*%L2gIMM(l5*TM)Z@Xaquk3ynnV?w~4c z7Hr{ge*sm(AL5*EHJBj0VQz-adE>%2qQQv~Ljnj>zRfjaG-uuGb|cp+Jmv7rZr%aA zsbfhFW0@@u--cGg1+uMgjD(x-&D+YRd3m-etSQo1^ zti7FSWTt^zl}Gto?=kEY4|k?XwWO%?EF-hfl6wSUv>xSNAsMt!aAO$-%_5ooQ0=Vj z_a=O*A7qs~n~XZg$Q+!`pA0U1D%$W$vwvODc}C`C4g*dV$jci}703&Gfkj^83oUXY zhciW@mwC03O97qI;YlylXKc}6aBHab#a!9q+AD)o1$L6bkN|>YmbgX?<}%M)YGk@m zxL7PHINW7$xcVH~VP&&@gUtnY>=+V2U|;SUG1#&{Z}5760S0++$1uOEBDh|#)Axo` zdC~e{$Br@R{EOUIx<(8pd+918hf0q`Ko1>QXZ;cwgR^$7Ip?fjvCjHcBd=0EtKqDV z?F<*Z1_N@cS<5bzgrkOP)^UD}!u*iTeyC&d7a>=Iu$@;Yn10Y}t? zFP+XCjcg3{HnE%a@EO7Gq8_7A56SFDy^UGD&DkK|-X&)vT$ZH=i z8OUoNHW|ok|09l0^3w^29Ns{3FTY*3X}@&oA;%Q6Gx+4st>tuw^o0e^Cx6LRda$X{~sy6E$l+z%Fcll-Gayf}Hu z?S@Hq1!7dx-RBD~Vr>{K7N(HArHwk-g!3V5okv=Y!8kf8+Tu3g?4l_Tzl^ zX6N&JHi+kQJUsDU>xpqbqqt(Y;39X*j+Lh6F|7a z?sttkg)8a(KrBBSg&hWaFe4AaD(h5|PpwT3yA15eF(iOM{;+G*sg`_NETdwDlLmQk z*>ur5gXFN!+03U~d+W$C1f74m9&;8O2RDP{Mex}8!w3}sdYm*o&|?6+8tU=Xt6;Qe zWd`jp^nz2{X5l>xDVT&}X?^vWEgz8^}Ktu-ZWWp@7#0@(%_4Gmv{I;H&}s zL$Ssp_fWu6Ddmg+yxar=*uA>>) zabrjTfg2XzHtJS#m%!^U;WL*+pvOVunb;o72BYu9@(x;aYI~PYj=qPEbaUH&b(h5Y zA>x|kH2hs)N7}knp|*R{ zK_uy>bf=OoR4Pd>C*(eiam%GKH1E6iTI>JEJWu`Sd)NBb`>pk@Z>_!Gy?mxwI@8Rb zRap18Y1N}Lg`*bEFRX}~w~kH!c)uB{$H`HG|E)8bp~USonK+}zC(Y{~apDQKS=&e( z^pvr;FdLgXai&O5loRzNIVsMRp=otS+e+KwdU|qVPH8gbfTEIi(k{-VRHsN=gwnQP zJ++_&AaizMNt`)4laY4p8c-$e5Il80%{eD)iVi7jFYUC0Wb}0DpdDk;P8i-PZ+J%_ zEP@QD2QyIp)i!A-Qk_>K9b$neofVdzAq840XJ}{eb^&jfJl@Vgj<+(O%LE#OEcG|8f{}TOk#m`+Vs|?q1;T4Q~7m+!gEscSYd7z&Vq_?u)lGklg9(f%$gVsACdSd}!~nM$Xc4q;BY`#~T#<9i+Y^pL)C@cGMUVK%lItE}mfITqOdO-JIX5mNZJP_~!ix}b{pa+9%5zs>x z?L}XGpofWcDrfnKiQmoWG$YfDK5FDqgIb-A4qkfGlD>_OD6cV6gNAg8kI@BbgD(a@ z&a~1BI|gU=qfl#1$mA1Y5Hp#bK|+1f$dlNbc>n%_p~X=Lo4}`RTjaNHrjeN@l=IUT z`GTHhkuT|IEb>J?+ag!i&syLMdybL5;B>V;*PyuP*`Npdpz{stdjS`=Ab(-=4Y8{i z3<)5t@;TS23%JV4g+`_tU1Z7s;K`pi@xi*7fknuyvd!tx!2|WzBb!2~&Fo?*{AP3uL(vMM;LLu6+LR0R zS{TGojkc0dx0(1L-EL$%BfXyNY0FoJ@*PHYgkW#5m%;Fx(VYxND+Gfx`w?tMF4&u4 z5QAlRkx<_<@|MxJjl7*J`gbhyMgOivzUXT$@a(PEJFY+bxslI}eGc;0`-ermp_qU#EaJ0} zx89c)`KRYUE&2!gNcWXR{?_|f@Z_!cwMD#1dFy>+5pPo7df%R4Ap`Q(`!2Zh*884F z_u2~i!N?EXw;yeV{AA>(u*{#?#ZAL+M)z^kXoXFKGyAb=Kjk*<-(e6pt@SS?)c@E` z`;|1mnYfwmH?rU8s7OXd2wjaN@Fv~Q^U(daNNz(zdLBm8g;T(9M#n^$0~HBrop&mh!e7X1r|_l5BL9BybnxU!nbnw3pNaTjm>tP%mgKXMQ$F42 zL^21V)49!MHx>;)jGV`$&T_gR7{yJcuu2z2vMAE$ zBYB>|7o&^kVNsN9FKpI*NhC|qlFnc$eK-UBW^@^6Kr74u&g{nwmgHuzJU56l$gUux zu8d@5q%TDBLheAkXpuhzVo>voUG#9)#ivm=t{c;Mm;a)Zsr zd3YldJh$Ce@8o&WH_?=2n_gS@xk%rIz3}U%KN|B^@@cc(=GWxqBL#~;I-sUc$9Za5 z(os74)pde`M;UiO75^2v#NSZE9odBn?if;iFRAil^slepqysf5nI#L9af;0!3J?|FQAuU-z@ ze-G#@m&64=nH2lZpITp1_bWkl6}|_w$o7+d;bXwnu=hB^Yg{|d1jK#6@G+nY_BZh1 z72O}U{}?a;HeY~vJr9(A3hA$d@iAbC4AG%76xp~gy#8A8O{DJK6ZIyt%h_=bl2=_;L%qVxMY}SL}}jS1NW$at!o}9nyTVvBF$ytYU`{ zf6PFD%j0~-eFDkibM-Nk^gX&+ITZE`w4`SPD(-Rnu8Uv1JutVlLNURa{V1myxp!Si zZ#RhP%sx#*h31}x{rMls4&{wDJRZ_-ZnKTd&Z!q@Zy;ZGb1d?O2knhMUwZQ_a>WO& zjV@n)3yd5I=qeB@`x0Gfg9gdI2ql=D*T5E4lO;x`8-+l|w1Vzl2Hl;!lzLRIG;)SQ zTc&lBbu41OTKnXXN^J4)!cEgJ2mq1A2 z%>TMJ)x@cIe%XrWbw<{46W7BJn;2K8n+UPH0WIk!Zln)FLpWm-U*SAyg?Yl6{n*3} zxlMdE4C2%pZz7>?wkzFYWD8@y25r=o2cvwek*y)zHg+={elt+E2uCY~gERXPZfh>w z>tPVXHG=TnYV-{%=$%G(GTNI^&@Ib%8QB#gLG816L!IjebCnEjX zmd`#T`?#V14&?(i{7Xm!#Lzywsrb$4uiR8xVN>DEer)P5xlR2o4C1C92O<5771G#L z%4mgjZl+Mpqa&zg8ACa7JKAw#(8O!6JJj%4w4^5nLTn7KrT%cn;PHH`r4{A?XZB+b zV{>u>s@V^+Pg3aSv5`VAPlyycd16jZKqv$G%7Rb^@|6Xp4CE^d>KDjeZP3L4{%V_I zk-OO-h_O`2;0Zxa98jhAK~5w)8(K|wv^rAg zQFsKMJDnM*VFqgQGcetXMR&Jh2s(dvx21v=Ijo78jz*BI__D#>06(&h_xdNGbJHg) zH0z9<)`eyT@>&;~707E{XjUMvb)i{-yw;s%k=MGImJiXR^Hh8jh=RoVu zh;**iy7MBL$NHF0t=qCYWa$FTIjsu_lXeS%53-dB0lOkn2-nq-La?sMRVEZGkgrTARv=%QkgPzyG9g2OTxCL_0{F_@ zV3Dg#C{t7>^yupR%6yfTIiX2fnUI%rBZa7hOQ_7P5Ua_))T0s_u}~ol;SdBTL}7vM zWCC{PCjiaBt}rnqfbg~l>1Z1QCJoBi^i>&~ X`kx8GS({wml}(+xv1w(u-|GDrq@g^% diff --git a/src/wasm/Hacl_Hash_MD5.wasm b/src/wasm/Hacl_Hash_MD5.wasm index 16b8442342ef70b4d09ecf6759a06371ef87ef7e..c6c7b045a3db6d4a13da0847a3fc92289b2d17ca 100644 GIT binary patch delta 101 zcmdm2d8~57W@g6so3}96t8=byYG`0!VyY9GY@>Zgm{ULoTmGrz$C0770I(f|Me delta 93 zcmX?Bxvz4=W@g6co3}96t8*@IYG`0!VyY9IY@>aLpGiT~ktN5G5rnc7I5%tSh;a$t kX$C4`W2$4TXRPA}V#duuW}S@uj0)mNYWX+6Grz$C0FNLW>Hq)$ diff --git a/src/wasm/Hacl_Hash_SHA1.wasm b/src/wasm/Hacl_Hash_SHA1.wasm index 40ffd059753b1b103d46df0369d56d6cb98b7161..b345c0a6f97ec37aa1ff1b4c5e11524688ef9084 100644 GIT binary patch delta 102 zcmcbTb|-B^D>LKz&27x(9Go9I8X6dwnCeV7v+>BY2y-f0DljQnII`q8GJ;T+g5_ot tflMjESzSPNY)o}*^^A3TK+Lpxoz_7XVSYtx1x5ucB=y#t>-4WN0{|a^9J>Gj delta 94 zcmcbUb|q~?D>LKr&27x(9Gq`D8X6dwnCeV6v+>BY@G~h`II`q8GJ;T+0_SE6flMjE lXD diff --git a/src/wasm/Hacl_Hash_SHA3.wasm b/src/wasm/Hacl_Hash_SHA3.wasm index e318aff08a7d65bf76da26645fd115f889cc1e55..3243ec79e02b4b850f6427f46581ab175a85b6db 100644 GIT binary patch delta 1720 zcma)+J#Q015Qg{8KHoWZ9NQPjhwU(v4{Vb-wy_=Ci7)YnC;}lm{s0w{p{D|p0tpHv zq;2^DP=S;R_yHhMQBzS-fhZ^t6fm>5F&6|9ij>{ioqgt=_tyLIf%W!*^?cv4zkWZu z=wz+rmq!N&B5B|8)wRS!SH7_@1u6Axr`%3MT8d2}ASJCEn2HT)A}+^=QdDR$&C54O zkgkbHl4^R>dug16Nz8x^F-qYqoPrEyrNA6fauG_FBI||*O^v5aRIk=blNm@-eb$tR zJZmcZ;D?&>tSL{le1zsx#F~8d#Ko46(^T~oOyg;oz!}P&iR7N9$W%Txo~otEd{5s__mkQ#Jn*eVr9Lxgvk}juf5f{VbftGWE>M+`AsR_wv0i{a=yR z2v=Bp#>$rFrEvJ$RiL7Ms6sXLp~6KgRI3tES*3`5s2XKqlDJO}4+6>MCCRjfe_ z*Xi(jkRT&^@SXbZ&e-Y`c^fd5h|;5Zk^!u2)scND{H9xDI7Mi zNg~ce6IB5tK`B!x-r&6z%K!X|FP)*qLRzu*7{ z8XiCp!Uqf=5PT4U2NW?pFz_&m;llv8VGDO)8;4|?q=PbYkRxN-&Uk7dyo2)Cv|T#6 z8!>I0q7$ZFfD5=sX7MtcrbjkyPn)*OrtPvM^)fTF^zwk-KHI(%zpLLgbU;WUV>kJ* z6eCPP;^JKv2^AAw=qvDd`%LOQd(Xg1XhrT$f^>lvobf;mmd*se!fuhD4W$4ry~B!Nf;KTjJkM zP*d8a5k1`Ks&|fWxhgjoT=l>Mhj1~BLk!}K`(XtBH_pEM`%emR)pl#`@?XHO4cPf# Oo#3*$?$L|ZdhQo8ZZs_b delta 1538 zcmZWpJ#Q015Z$};*}n5f;+&ntiA`ogzHFStmjoM|*vS&GKva}eh=RsY@dNTvP*8x7 zmLCBB02E4uM2D0nR7m^*>PScwAi>PuVlG0qZg*y9-n{qjv?oukZ%?f^1J{1>^Z2Ga zZh7Bd93BeK-Y=^A$!A5d!I9P@`VTQ9KUtWCta|G$s}B=x^)6K?L@)qC{tTmmT&!?SE4A*%DA&81kS(= z&Vr@W!@nyX!0eMYJVww4?$6605;h=o-Rr>Ka5 zyeeLR3pfXJ>L?dD0HEOzFoFYy1A>DX98$z^XyBqE?GxfW%;80t#|83fks23cjSCbR zpDvEJ282&=QTwz+ZA&qq<|#Vo(=sgM3i-on_DQeo(~9(sm+YhI(s2wNR(RWge~Lw~)) z(U;hSrX~|Y7?Ei*nI_>iV|XD&OeQpBq9P{K4{?J?Y{CX^k;9j%@p7zjiy}j0bF?)e ze4;*POSh!J7WK7a7H?2=&f+$-@d{bXX|`CeZ1ELsaf>Z(X^X#P*PRY@w5l$2BUK%m zg&i8wiIMA4#HzZ6P*lVqUKg*zRosSc{TcS4r{Vh0kKlR?*CV)I46aWR!}SeZRK#$( zKJLIa?!pdA@YPpnxNwai)T_K!zeh-WG4(qXol}1uuHy|-&1qJzS5|*RtKVbwds_Xe zpH%Pj1@FTlU0Sxi@2c!XRox47$G`G&e(Kdxl=coJUon^jfhkC-v#?f597xNx5B~k6 z08c#+8>>EeAZV;VUZ6WqWcbd5B)zr-x{v_3B}C@T3{0yZQ`Nhk6p?}->oYwxqyclLSyhwRfoWM9k9%lPkauHTgR)jo>Gz8>32bbWdFhw7GcCeC)U zIE%CGQaP2u_@Cfd5|CUDaBjP0<^6|y?3k6E5w;^)vB!>#U;pm>Rig6Pl?!E&a))ql z0T(-24h=bhp#d)DT>BO&;S!m|xt!0IT_&X=izjjE=$MqjbuhD+gHBdoR6>jH?Q%sWVAIP*Ts; zp6>f>;9Bi&f$b$&vT`l=X19!Ju1dz0WVOkmwUK3Do!za9alme0^kh^$pd?}Fuw$K1uqQK^P zz?O0+cX_~;rUA=H0_#fvLzkbhWgf8Q+{eoeuob*QR&t-L;+4G0mD^N|$!cD$+8Vx% z*Ff|b3fnh%2pL)vW>T?b`Njmd!n)( zo!ed6!8;6`oxFuNy0QzXb{V+$@-DtND);ey=KB4(u+zZ3U*T@{;12L6-r~U>n4q#w zAD6nAOfHT%mJDK@4mz2sbqdXU@??msq4*Iz;6<>Thj_OMU=Qz+y*wntyqAYv*{5Pm zMtDTE{k)I&N96!I54iFmKWG3RcI#U8zOINm=% zl@VZJ>?k%DIc--?M6vM|Ir&#k@Y=kRm;$*fm-6&ss?!y_PaAh)u zgHpkTT%iaGc}j;BfWVGtLrm_lf(Wt_F;=F9n9o&}h)pMru>BU|q8F)csh3I(qcScN zXi|~GzLiXMvg8)NCHCc|y4Yvemq$2H_o6Bl0jXADOO1-4)T&^q<0`K9!qc5h!>jSa zYd}N8tM$TbL_@=?^TNaagk7xxyILc*v?lCl!Yv9+xLLy!p59?Cupy^;@wFnp*0lJh zLQ91wM$;ayz7ToFjHk2;i5=JI46=D&Wx0Vvm%+{>_5`8 zP$vq)Ww@RbnGiOmNCI)WovLqq6`~s%Ih9<4B^iHd5jE!4axGF&C-w0!7EyJifg3_5 zy$#anSkeT{O=*~IX#Nv5jZ0EWVr$2mZ#Iu}3%2!^sI(#~-8N_N42+2FJ-ctF_kWc% zUc;uCtt{?}>@Yt864Yo~1B4YCoHX_Emme=~3PV%aQP0Hhy4`u_+>e*(zeycCyw zR9pFO-WvP6U`2#?=rZ2PMX8Is(k)TNP{Qt&`}sb;-<1IsV=~Btstxe~4@KnxbUxt9 zZr*JkjXhYed(3j*i<*eKsKKCbr^|c2<-Rw$+yfTIMTq?-L|aW|7`B^S9v+u{-g-yb z^49x^#u|WT*1Hiw9+CZs`~V_9kRCbe+J5x-0UzXpD0)T74nC;s{SZIs_3$B0_5nq5 zz(cYR)o&3W9+x9dwjAZ7F?r8l_M?mntL8^_`5eW#N7I(iC^TL6qlhGBWPIk1%9z6v z=5So@p}MvRqY}=8qp&&(!x6T&PPu^i8RlsHPsC#c&c zcKv~(W}v7Jgl1|Wm_XM|xQT&$QynPclAItO)9L<+kpVerq%98{8I(tiWOWyd9XW)5K zJv9S2qa2(i5YPlk_J{{;Fb!wbS^U?lDKGbBg?NtmoCond@p&KOIRo*Wj)Z0kB24rW zAf8V^^i2)K^CYh*#0z@kg>;B7lj6SN%-z0g9_$`v_x|H80emj+b?stga@e}a2Nx<8R7|m1yU;R3GpW+pAvsc@)_|FxV;Xy5&n$a@p1W_ z`1AN9>*({`51$2JF2Mm${EqeXZ_F7o5%`M%ALlyBcd2~5;!MO!EB3h2B)qqjf}0kb z{FG{0Dpmx=`j!^S*GVp8hydPjAZ)zkKm_$YCuFVRAby5%t=r-s6*)*a}xvbi#i=jp{QuuJdr%_`F9;krEi6Xq$7wU~JrCq5ae z7o;jApc)ClnN2bQ1$ZUW6O?L`fHLoupav$oKzd?nK zgC2+=PI>Aze7rs&{6_QJWWt}O*Dbs!nVXuun_9e^@P3dCza<$yT7F#3|0{e%-yFIt zMSXp<=|2u12TB_LjA6WV2+thGiIR;YJM7FwJZl)2Onx0pX5#^B<2E}WbCf@s1F+t9 zoHi*T!SxPq!xkxRXwF66Ad{q{9h#=79r1a_oxZWYT+hNC^SE7b%)|wDJ>F(e3AoD^ z95@wnzG?NqW&yZKFZTj~6aO;X2Y~aY1b~Iys{r&h2Zk=>JiAktqOT7eZ5c1afXic_{Ia?i$4!Z`R`N0(YZO(6AlAVapqzq^fs@xm>X*V_-8tVI+Sck^bA zXfv8y5fNlEqJF)E?DczP6c*cjV~t36ihH{b9+;M~- z6i|2>Me4_R4F1EZ%r8u^bQl~1Bh_KX&^FW>jpR5Hy@vewd>tmo6XrV5CUk&TM+Rl-HiGdD+ zF%9x7ei)!!gM0x>00F$J%0(zPl-E?b1Vvvlz^Vp8wK{O{VhLE*QQm;92CQlj=h8>* zc@hE1x{6d|r{_Tp6jTc`ARtME5H&vysqu z|MmD6fBFy4-Q?u}bZR8@_h0?~TTBtUG!r+pD=}qWK0uukT?QyCPHFi2JkHllVQV2}ajSCFd8R0zd(DTMW57WVnoD!Y_;v65^k2B2*u8>Nu zl&ShbEB?d?dbX;VvSgTYjn+s0)M&k04=;47#lnUcksoYju6o*zGq{N-kZ3;g_j zbAV1|S3k)41Bt-8B7vXmZz{mb)6wT}|vCKP|DgT^0ca4mCxYB5^N^D+T`=noGiP&=^|@xw zI=K$hS?AALi;EB0cK&-L!AOIoUm(Qj0KKPkgig?fWPvmT27x#Y<}k(vV|++}EpZy88Su;x z_*~erJvLt37>v&aV`CfJtPhsmt=&zW*xs#5Qk6}mlG;?fRmo;6aV3>4$N66OKZnbd znAflS{jb0Gb^qP(?>_Zy&a>a59gF{dVu$iv9twY3-Nui4NKsBO zo3bg#F5$;KgulF?#a_-OFXeVwmhZouBS)+pPuPx3ia&NJdil?1FRBO^2eX1Xluvdr zJE(RBy*r1=rfGI1PY){i%cp7ShG~?Gh7u~_8I(&?$g*d!KV;Di@{dn&DY`4m8lbXZ z_R{QupcSwzvbdZ|$wD0*H&|9Q6roZF54#nmRy4*HRBm*t=*BZ;K{d=}i9hXB)8m&8 z+n4V=|EkCL&7%N6%@dvyvE${hmQN}Qdm<_v-+lgg{Q9eZ8z~A|;~_Q9Q)wzsqarS* zX;kd+bP*GRShN{5oo2+?k4nG8rBoX1zi@)fXet#sTn=RAj`$Eb6)~PkGj;ncG?azZ zghR9N8v@{vigG1_nkr~3nJC{6t_*q1WTlm=_~(WG_?aGGq?)RU#J-wqz*8-FstsBx zo)}cuYW6`A*HM+xU7z(-Bh>+_qrf;KTnyiA&kfgo?iAMHBn9&f+#c$t9u2pbBE5VAb#ouxKz$C+7cs#LXn|<`G@troJb=mphl3Q< za6;5e-3~7Vs)ZWvB3ej`VtgarsM{B#A*A6h7PvhI?h=|uy$0@*47i?13h!VNuP23f zslmI925FhbJ48dgoCbLXEvFR@uM{!Ct7w&It7#>zj`12)u5tJ#x=F)XOG7m1@H(Jc zr{UgA>*(efuc!6WzMVIqWv#}$LGUg!csJ5g8Zvk{X5h_A!QGUEiz;_E++yHvrcJb2 z!@ZSmDEytZCPEWrp)S4F;H}M1SH$>;qKMJyh>||S(nt96(TiRAxnCZ`8Z*fg_QgNF z`a&YSUmc8~UyBO~!ppV@n~OyFvC=>g7MnsWogEjB#jxhs!6_F`P*E@->U0$RP^Shf zL@hy~2I{n+E$R#^#zHh5t56A6pcz;u3#eQG6;N5X<%KBiw(LGgf4AjFv=xZ9A~RZO z-kmaP3QKl@)`o%Bw!}*X!it}|ToV8BN5083gV{WbX2sRHx{gY!3~7s5K{dj&;B;2d z8cHtLmC$4dsDvYNV~pbJt>TERb3S2%MR>VNgw53={9Ge~xRx06RkI;$n6Oc8*r-QA z*r+jV1W*t*Y7HA$nXsPKV?7ICF>Am|M${|-h?;~AqEI8~fShgEXvrF&mJBv#LQAtj zT1&IhB7@wL-chE0;zZj|zl71A;RzQ-JmE=^yl_^eIGi2H3g>)B&&H0EA1*=rq=@gz zuS4peJb5*kvj}x2)h6D)Q8nb&0ewBt2NG>Js_F<9XG1a8XbfB(KmAL`w4=*~y|YA-PfLX-r%zR>ihh@&XtH99j_N{=~I~s(SYT4TSXT zG9Zz8p+VS2_-TNHh%R>frXlm$2K@t)qA&n%r zpBH1^E}wW zuRe)LM@vIjKRx@ufB4arzxfcl7kXF%S{i~TZ`(5XUzQO}5f4!o_T*)WIeS!b>2g|* z&0z>8(&-BExJ;lgi+@p7lC_dn8qC-+4Ca-Bc_n#lw1#g2uC?&7HWL>#m*91@I<4s4$KmxD$n|aQ+wsI!0NeT?vHx;FyxhK=Xy>t%`)4lQe4SJT3 z>RCQUx6|EpyTk#Y#X%6rW$$M$7r`96uv3r){* zE&Q~P_an{&i1R>ZoP3b>qsAQsY;d#}*pn6XfXwnkbkNk$A))zzAUR-=?B&C>m<~sI zJeb21G!f^Q$`dDxl&>R9VMFkQOr8n!i%l=JQiq|*gOL-bW{bcUYH$0o;rdictbJm4kpqBheuVjm1!C;s+pdDRPloY9q@ikMvkv*!+PIgjBQTDROl-bNqDSBM-4;B4Daa_^Kor<4ObV~7) zih$!uy^x~t1PWfMiKAdk;fE;5>hqLD_!K_BQrX&rO7!#&2VlxKMb9V_yo6$rU0;Tjs(@@8c~PM>7230;gvLVD zVP-$4=s7d9pI7uedXt&`oCffm06;Mf04CA|0M939wreT?mOQWc1(@?0dGJgo<(8;v z#tX3epHU-0{2?05C6j2u2DG0MU&R)_T}(d4FDl?XtLUu3c}~$e7w1`x^Q?4-Vj3q* zqzO3BC2_i@;(PF%;+F*Hd3o@BCe9a?=xLyZ0DAuWKw$%*=i#h)G6wNwMK7B{d_~bK z?jXLbvArx^pqR!66X-ICuOtW2H5F_*uPA<1nO)&EMXxEdVZEmKbwT`wxO*cLae=6Y zME7C*&MQ>{H{#n_Y}Qn7`US7ip#)znt}HOGKy&iU;z94$TaO)1~#O zvvQ~+BWpwBZDN;M=bDvgBzx<8@{cQVs$?yu%26irq+e4;Y*mO2$1SO)HXq+KjCZKY zqIlM%PXGv;10wu5qNx)C>UHP={fo~65HMTX z@mXLVYBCQsn}=Em>AxFxbBX|z+{l`~OF(c72*|8+%^JT?08WF1fYwoNBcirZd`{Rn zzr&(iL~};bq!|VsbMb1lQ@icuj>NWcRXxy2oqC5yTNkxs{o{5NyTKY{Dj(^DCIodN zHgE)sd9JY>KeTZVb#gC`bn@+boCINcr(T4UQY z3|6xH5cNy<{V-jK?jcj%2cb##!BqG7+R(<*J>fee$cvF^6`(_(-NX2H;6Y?u_%c9u z%M#aix!vLDC)wR{S}Gov!gK|?gG_Zd1WmddN_Dr=H8zJWt7r)4M2mxYvLC<@r$wu2 zjXed&MXP1-*NX5mQg)d!H;eG&s0f011CEI>an|W6bF=epkx<{jRZbKT5lJ4DR>P9=J!YpB!t+_ua|5 zpZ19RJ#a4>K4i-MK4^k|U&{SK*I3*a<4kKGAHs8oF#zKqVXFhdUQJ|unJ%Q}PRlj7l&csP|Az)77q-yxN$FeHbLi$B|vJo*Wp zM?a~MBqLuomcvMf&7zagHHhq#^cG>Hv?uYbADOEz*pj_UjhB&`O2IM-Xy{w>Z*o-9 zOXRs7$mmZiQ(YOwQ5FvEkV_(Y4WI>Suh?5rd<&rA1xbYQJ#Ye9DjKq)_#i-a?iqy& zd=;FC_kU3mIivWzq8F7UU&F}u3}hr%lJntP0F)pX8m5<^*ic>*o*-8m#yRjIyO)tUT>z_C$Vt*u@G}x8RFLFNZ)0rT zJp;K^vUo{_&YgWS&)tVy%{6;dg&sWdr#G*>___&g; z>Nlhyi_(E31Ji*cCDMUk)zZ&BNUL<<$gswQAtY3gUPf*OX-j4W>1X6skiG zo>;U^HF>V9gs)yzY#*avy5K*9A^i%94BcOe@@psp_^(Cz3n(&lpNsOBP-N)75Jitz zPSEn@1eK4!RP-gpZxsCo;rDcGpoY)y3p&Q)k=(3P{%tl;L1@rUP6@aT7Ie#rdr{%M>xet<)+fn{7DHY>OfB zXrei|hoZUKsT@_d>qDFlVq8%%V96m)gF_wW5T_&K5U0wdUSIrYafcYPTv}B+)EVV2 z>Iy~H;11N{EY7AZ%9cCN>kom!Tu(UPW$<72sYg9|a*@ZCxVEfJ96SgH!51_B%yr%B zO}_YwcXA_{H+AXs(1$Z?m%KWp6Dg%0mvi+F2LLFb$!nkhHN=qa&DL=?Qls?R1in(w zG-o&EYv4M9O*ZTv6L;j=8LBr49$jyq(%9LPpqf&f85j;8m^(g}f$* zx(#{V8RXRpd9`wyZ|nQSd9bWAi%4|%-+3|w%3Y^4CAFa4UN;S4+TF#+14A2~m4z5xiT%|%x z$modHMIF=?!^q5)i_Cd6PY+oSNbk`@){CpvR;qW0tknV0`e?p4HDq23{er82d~4WZ z$v5EfT8D0ka-ZIzchAR(K9&{>LJyi2w$c1FA4#>v{Rq52GjOoJz+}wg6Dc??k5F>NZ$LRjm9-{$2!$JC46&tW187#tUkPN(e)%qeY3T+Uok4r`K3sQ@C7Dcc~7SE39V`av^-w zH=+jW%L*FHMNTvwP)!@&oo9_s$B{y2V$kxXnT*;Bvr zj*?hK(*IiVZc<1w(+OB@pagBgq%=(REtKr@>rw_$Wz7K~6fU&^#FBzR1Oe(%78>QC zf#z*_1+rmV%e3W>66?MfGujRuB^pBTkOFK@LN=G%(KZ8duCUZIIV%EGf zYxb|>Yn@#<>_$46XJi6qN$2i+!G6W_elRY(Cwk3S4Gb z&mv^P@_ur*Dk@V3)#)M!0qjyaFuXd1q#ES^73y&52sLVO&?Zl6E(C2bngu-y%P{Ei jgra8Pn>;}lb>xu-l(bAbRebz`K`pfWH!C|%na}zk=AmGM diff --git a/src/wasm/Hacl_P256.wasm b/src/wasm/Hacl_P256.wasm index 113d780fc88014d7bf47007aee3d6843061bec5e..83a71ab29e05334d2a59c1cdd3a5fc530097d357 100644 GIT binary patch delta 1427 zcmZWoOKcle6rDF7Pn?XKO#BJRaoT$u(k3*B#*Smhc5S~&n-aUYHcgtJTk-2hDH3I0 zW+D<(T7)Vzi&=Gnx=4kD6k#A6SRup)@!51iOB)HXMal*c3&ee6QyW#XY~T0p=bZD- z{phCmgPY#%x=;S|uf}!X+Lrd<-Su_JBbyc(wElPvWNff>Oag;+4Lo_K&lfoU7`YCEp(r8lnf0L4gCd}Cy zhV2jUgzevV^~Rmg|7O9TZxB7dY<63+DO=2(w$#SNZ{d&~;Al<{?WZULI4o)UB%rnGz|$Nx`eo=Jf(_!HYXT zjj(~yIrj>SP{b0H@D!XPVlEq!;6<)r8Opj3PdoSw8=kH}#gLlvRG~~yi)En{Jr4br z{yE~*qx6RI>7q&rYeQPf7Yk5Hwb zK$RA#()QlL0Z#@pPF0ALg{YC|*k)=quCEIjMD@nlcWbxQzU%Z)voa+n7 zVV=Mhs7;{*oS&kWCpa8y2L^C3>NrC?GgCL%H9QV8aC{R_zzLrJYPWmupL=hLHXY>-yJwfE0b%egvzE!8*No!|N6C_57R7uR|_Z2$lO delta 1476 zcmZWpO>Yxd6n)R0*v4bWj_m}AkmOttX;K=Bjj?NoB%UD<5(iRK67oSHfwlw_^$$=y zsEPX#RCO^D!>U!)MOKv}nUW1sS5>>J+C{~p5~kYy{i>V0E3G3w&Y%zbk|&OPtk z_vuEoFKO^X%Lt+Wu+AAgo=Tg&`*hvjsl|N;D1?yx)05ugVT28l7Xl$e z-ldSYz#pQh6%5hGn_;!RT1I$4h`d7)*_8=XE2odJW5m$L|8o><1yy;GArYVBGHsn=bVX` zaynK_b~#^+%2@5IZ~l_;7oTvSAy4Ii<*9fqS;<-c+SazIb(a)dmoWiejAkt>1h?=O ziiRvx(Ke2COk8pQ&@`rL1~YUOSD8qf#VpNX&JgD{%MdM)9Bs-qn#a5;OSAxq1ti9z zhv=FibzP@LEY?<^eMga<4;V{}r{ZNCJ}X!&ypS>&Q#j(#5x2*7`XdP>s2%Op0iS81 zqh{^=Xmdsdh&i2po_Cw5BNpXf`^ceHTU(r)E%X#LU-4pVtrMR-{aSNDdq695+g~cZE%&=7# z3SDQ(?HM<49XG19#Oy3pP5C}8V+qUabQ3p?k*`0dTezjjcpKNiYoCmFI0c{VBA4>n zzr@=H=w0_W-NtQN!3uMt(;eK=sf;oL%8mT+!LO#}d*?)-Jm_z?f_!lL_GF*@Oj84s zM3P0@`Dj4u=@e4+p-C#zlH5+RiLew~O3o=IXC9)ulfC;qQAp7#u9oJ2bW{I=t^MH% zvCz{m37y$v5AG?CSX~%6tHXqSl!75mZdis zSdIg7O~6jGwGS-Jz;gcsmQ%oT_U@4mZytHacun@E2Qlah3wZ@2&sp@F@sB(aXvpO`#bq=Xj}}ha+&ce RKjx}y?xnE(`$>5`@h=MxLyQ0b diff --git a/src/wasm/Hacl_Salsa20.wasm b/src/wasm/Hacl_Salsa20.wasm index 0df1cd7668501856c53e7f3dc0ecc229350021dc..c712bb36a95d3f9441da82d29d4899cfec4ae3b3 100644 GIT binary patch delta 1169 zcmeH_Jx{`55XYZZ&=@soLLo+t-Ysfkl*AAQCY}UZprxg}l@}RYfw;IEzJOCFH%2EV z;S2a}j0qpYJHT33XSVy_%kS=a$QWQqkrM)nQW;@!D^G^7VtFYp$!V~AJOtxfM3RDOQm-0lf(mJ3jVXn*ummQS z;RY*2sYEDcQp#uvDuw2n$PugrQ(cd1Xn_@IiSsKPY$>wT-{Po6I7|KxWs{20P&Q4N z4W7?V5@2H$s&D{lZSP2G=zznVj0^6H<1oh|juYXyq?qF}hnlUDcsbj}niS|!fhV={ z&ui4M4kmeG;l>SUP%$>4i7jYh8|usO5ZVm%q%uU0c1z`bLivAC9jK9q`tv(iL-P^4 z&{-+yGQ?+y&yd?5L3$C0Nh+L$aI6@|Wj!wI(K4xA9}0L!XBklWAhQ1*sr6a#8f&Tr0<_j#uzX8*D@;d+k delta 1173 zcmeH_IZne+5Qd*kh=f>VK?o6|m}yuFRs{+~ktY$`NxWrWoJ68^($S;b!8JV8dMfGSA#)@HVQVHsA(@}nl zsC#PLMh6_&gmZ0upLi~~GoA X@G^6;93VlHvK*j)576WbPgcJH4@C5$ diff --git a/src/wasm/INFO.txt b/src/wasm/INFO.txt index 7ed8e74a..60cb7b00 100644 --- a/src/wasm/INFO.txt +++ b/src/wasm/INFO.txt @@ -1,4 +1,4 @@ This code was generated with the following toolchain. -F* version: 155853a14336aa0713dba7db5408f4c8ab512a06 -KaRaMeL version: db63c1de17565be0ec4989f58532717a04e3ff40 +F* version: bc622701c668f6b4092760879372968265d4a4e1 +Karamel version: 7cffd27cfefbd220e986e561e8d350f043609f76 Vale version: 0.3.19