From 92a0be5e620cee042060f71b30943fa5ee0c6852 Mon Sep 17 00:00:00 2001 From: Ezra Shaw Date: Sat, 22 Apr 2023 17:07:46 +1200 Subject: [PATCH 1/4] move to array simd --- crates/core_arch/src/macros.rs | 4 +- crates/core_arch/src/simd.rs | 1091 ++------------------------- crates/core_arch/src/x86/avx.rs | 18 +- crates/core_arch/src/x86/avx512f.rs | 24 +- crates/core_arch/src/x86/mod.rs | 35 +- crates/core_arch/src/x86/sse.rs | 16 +- crates/core_arch/src/x86/sse2.rs | 16 +- 7 files changed, 118 insertions(+), 1086 deletions(-) diff --git a/crates/core_arch/src/macros.rs b/crates/core_arch/src/macros.rs index 0c86a24ca0..a75cb4ec2b 100644 --- a/crates/core_arch/src/macros.rs +++ b/crates/core_arch/src/macros.rs @@ -52,14 +52,14 @@ macro_rules! static_assert_simm_bits { macro_rules! types { ($( $(#[$doc:meta])* - pub struct $name:ident($($fields:tt)*); + pub struct $name:ident($field:tt); )*) => ($( $(#[$doc])* #[derive(Copy, Clone, Debug)] #[allow(non_camel_case_types)] #[repr(simd)] #[allow(clippy::missing_inline_in_public_items)] - pub struct $name($($fields)*); + pub struct $name($field); )*) } diff --git a/crates/core_arch/src/simd.rs b/crates/core_arch/src/simd.rs index 281fefba42..75a674333a 100644 --- a/crates/core_arch/src/simd.rs +++ b/crates/core_arch/src/simd.rs @@ -3,25 +3,22 @@ #![allow(non_camel_case_types)] macro_rules! simd_ty { - ($id:ident [$ety:ident]: $($elem_ty:ident),* | $($elem_name:ident),*) => { + ($id:ident: [$ety:ident; $ecount:literal]) => { #[repr(simd)] #[derive(Copy, Clone, Debug, PartialEq)] - pub(crate) struct $id($(pub $elem_ty),*); + pub(crate) struct $id(pub [$ety; $ecount]); #[allow(clippy::use_self)] impl $id { #[inline(always)] - pub(crate) const fn new($($elem_name: $elem_ty),*) -> Self { - $id($($elem_name),*) + pub(crate) const fn new(val: [$ety; $ecount]) -> Self { + $id(val) } + // FIXME: Workaround rust@60637 #[inline(always)] pub(crate) const fn splat(value: $ety) -> Self { - $id($({ - #[allow(non_camel_case_types, dead_code)] - struct $elem_name; - value - }),*) + $id([value; $ecount]) } // FIXME: Workaround rust@60637 @@ -36,10 +33,10 @@ macro_rules! simd_ty { } macro_rules! simd_m_ty { - ($id:ident [$ety:ident]: $($elem_ty:ident),* | $($elem_name:ident),*) => { + ($id:ident: [$ety:ident; $ecount:literal]) => { #[repr(simd)] #[derive(Copy, Clone, Debug, PartialEq)] - pub(crate) struct $id($(pub $elem_ty),*); + pub(crate) struct $id([$ety; $ecount]); #[allow(clippy::use_self)] impl $id { @@ -49,27 +46,22 @@ macro_rules! simd_m_ty { } #[inline(always)] - pub(crate) const fn new($($elem_name: bool),*) -> Self { - $id($(Self::bool_to_internal($elem_name)),*) + pub(crate) const fn new(val: [$ety; $ecount]) -> Self { + $id(val) } // FIXME: Workaround rust@60637 #[inline(always)] - pub(crate) const fn splat(value: bool) -> Self { - $id($({ - #[allow(non_camel_case_types, dead_code)] - struct $elem_name; - Self::bool_to_internal(value) - }),*) + pub(crate) const fn splat(value: $ety) -> Self { + $id([value; $ecount]) } // FIXME: Workaround rust@60637 #[inline(always)] - pub(crate) fn extract(self, index: usize) -> bool { - let r: $ety = unsafe { + pub(crate) fn extract(self, index: usize) -> $ety { + unsafe { crate::core_arch::simd_llvm::simd_extract(self, index as u32) - }; - r != 0 + } } } } @@ -77,1029 +69,80 @@ macro_rules! simd_m_ty { // 16-bit wide types: -simd_ty!(u8x2[u8]: u8, u8 | x0, x1); -simd_ty!(i8x2[i8]: i8, i8 | x0, x1); +simd_ty!(u8x2: [u8; 2]); + +simd_ty!(i8x2: [i8; 2]); // 32-bit wide types: -simd_ty!(u8x4[u8]: u8, u8, u8, u8 | x0, x1, x2, x3); -simd_ty!(u16x2[u16]: u16, u16 | x0, x1); +simd_ty!(u8x4: [u8; 4]); +simd_ty!(u16x2: [u16; 2]); + +simd_ty!(i8x4: [i8; 4]); +simd_ty!(i16x2: [i16; 2]); -simd_ty!(i8x4[i8]: i8, i8, i8, i8 | x0, x1, x2, x3); -simd_ty!(i16x2[i16]: i16, i16 | x0, x1); // 64-bit wide types: -simd_ty!( - u8x8[u8]: u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(u16x4[u16]: u16, u16, u16, u16 | x0, x1, x2, x3); -simd_ty!(u32x2[u32]: u32, u32 | x0, x1); -simd_ty!(u64x1[u64]: u64 | x1); +simd_ty!(u8x8: [u8; 8]); +simd_ty!(u16x4: [u16; 4]); +simd_ty!(u32x2: [u32; 2]); +simd_ty!(u64x1: [u64; 1]); -simd_ty!( - i8x8[i8]: i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(i16x4[i16]: i16, i16, i16, i16 | x0, x1, x2, x3); -simd_ty!(i32x2[i32]: i32, i32 | x0, x1); -simd_ty!(i64x1[i64]: i64 | x1); +simd_ty!(i8x8: [i8; 8]); +simd_ty!(i16x4: [i16; 4]); +simd_ty!(i32x2: [i32; 2]); +simd_ty!(i64x1: [i64; 1]); -simd_ty!(f32x2[f32]: f32, f32 | x0, x1); -simd_ty!(f64x1[f64]: f64 | x1); +simd_ty!(f32x2: [f32; 2]); +simd_ty!(f64x1: [f64; 1]); // 128-bit wide types: -simd_ty!( - u8x16[u8]: u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - u16x8[u16]: u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(u32x4[u32]: u32, u32, u32, u32 | x0, x1, x2, x3); -simd_ty!(u64x2[u64]: u64, u64 | x0, x1); +simd_ty!(u8x16: [u8; 16]); +simd_ty!(u16x8: [u16; 8]); +simd_ty!(u32x4: [u32; 4]); +simd_ty!(u64x2: [u64; 2]); -simd_ty!( - i8x16[i8]: i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - i16x8[i16]: i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(i32x4[i32]: i32, i32, i32, i32 | x0, x1, x2, x3); -simd_ty!(i64x2[i64]: i64, i64 | x0, x1); +simd_ty!(i8x16: [i8; 16]); +simd_ty!(i16x8: [i16; 8]); +simd_ty!(i32x4: [i32; 4]); +simd_ty!(i64x2: [i64; 2]); -simd_ty!(f32x4[f32]: f32, f32, f32, f32 | x0, x1, x2, x3); -simd_ty!(f64x2[f64]: f64, f64 | x0, x1); -simd_ty!(f64x4[f64]: f64, f64, f64, f64 | x0, x1, x2, x3); +simd_ty!(f32x4: [f32; 4]); +simd_ty!(f64x2: [f64; 2]); +simd_ty!(f64x4: [f64; 4]); -simd_m_ty!( - m8x16[i8]: i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_m_ty!( - m16x8[i16]: i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_m_ty!(m32x4[i32]: i32, i32, i32, i32 | x0, x1, x2, x3); -simd_m_ty!(m64x2[i64]: i64, i64 | x0, x1); +simd_m_ty!(m8x16: [i8; 16]); +simd_m_ty!(m16x8: [i16; 8]); +simd_m_ty!(m32x4: [i32; 4]); +simd_m_ty!(m64x2: [i64; 2]); // 256-bit wide types: -simd_ty!( - u8x32[u8]: u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); -simd_ty!( - u16x16[u16]: u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - u32x8[u32]: u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(u64x4[u64]: u64, u64, u64, u64 | x0, x1, x2, x3); +simd_ty!(u8x32: [u8; 32]); +simd_ty!(u16x16: [u16; 16]); +simd_ty!(u32x8: [u32; 8]); +simd_ty!(u64x4: [u64; 4]); -simd_ty!( - i8x32[i8]: i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); -simd_ty!( - i16x16[i16]: i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); -simd_ty!( - i32x8[i32]: i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); -simd_ty!(i64x4[i64]: i64, i64, i64, i64 | x0, x1, x2, x3); +simd_ty!(i8x32: [i8; 32]); +simd_ty!(i16x16: [i16; 16]); +simd_ty!(i32x8: [i32; 8]); +simd_ty!(i64x4: [i64; 4]); -simd_ty!( - f32x8[f32]: f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); +simd_ty!(f32x8: [f32; 8]); // 512-bit wide types: -simd_ty!( - i8x64[i8]: i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8, - i8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31, - x32, - x33, - x34, - x35, - x36, - x37, - x38, - x39, - x40, - x41, - x42, - x43, - x44, - x45, - x46, - x47, - x48, - x49, - x50, - x51, - x52, - x53, - x54, - x55, - x56, - x57, - x58, - x59, - x60, - x61, - x62, - x63 -); - -simd_ty!( - u8x64[u8]: u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8, - u8 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31, - x32, - x33, - x34, - x35, - x36, - x37, - x38, - x39, - x40, - x41, - x42, - x43, - x44, - x45, - x46, - x47, - x48, - x49, - x50, - x51, - x52, - x53, - x54, - x55, - x56, - x57, - x58, - x59, - x60, - x61, - x62, - x63 -); - -simd_ty!( - i16x32[i16]: i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16, - i16 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); - -simd_ty!( - u16x32[u16]: u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16, - u16 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15, - x16, - x17, - x18, - x19, - x20, - x21, - x22, - x23, - x24, - x25, - x26, - x27, - x28, - x29, - x30, - x31 -); - -simd_ty!( - i32x16[i32]: i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32, - i32 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); - -simd_ty!( - u32x16[u32]: u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32, - u32 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); - -simd_ty!( - f32x16[f32]: f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32, - f32 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7, - x8, - x9, - x10, - x11, - x12, - x13, - x14, - x15 -); - -simd_ty!( - i64x8[i64]: i64, - i64, - i64, - i64, - i64, - i64, - i64, - i64 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); +simd_ty!(u8x64: [u8; 64]); +simd_ty!(u16x32: [u16; 32]); +simd_ty!(u32x16: [u32; 16]); +simd_ty!(u64x8: [u64; 8]); -simd_ty!( - u64x8[u64]: u64, - u64, - u64, - u64, - u64, - u64, - u64, - u64 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); +simd_ty!(i8x64: [i8; 64]); +simd_ty!(i16x32: [i16; 32]); +simd_ty!(i32x16: [i32; 16]); +simd_ty!(i64x8: [i64; 8]); -simd_ty!( - f64x8[f64]: f64, - f64, - f64, - f64, - f64, - f64, - f64, - f64 | x0, - x1, - x2, - x3, - x4, - x5, - x6, - x7 -); +simd_ty!(f32x16: [f32; 16]); +simd_ty!(f64x8: [f64; 8]); diff --git a/crates/core_arch/src/x86/avx.rs b/crates/core_arch/src/x86/avx.rs index fafee5c0bd..afd4fa825c 100644 --- a/crates/core_arch/src/x86/avx.rs +++ b/crates/core_arch/src/x86/avx.rs @@ -2263,7 +2263,7 @@ pub unsafe fn _mm256_set_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_pd(a: f64, b: f64, c: f64, d: f64) -> __m256d { - __m256d(a, b, c, d) + __m256d([a, b, c, d]) } /// Sets packed single-precision (32-bit) floating-point elements in returned @@ -2284,7 +2284,7 @@ pub unsafe fn _mm256_setr_ps( g: f32, h: f32, ) -> __m256 { - __m256(a, b, c, d, e, f, g, h) + __m256([a, b, c, d, e, f, g, h]) } /// Sets packed 8-bit integers in returned vector with the supplied values in @@ -2331,10 +2331,10 @@ pub unsafe fn _mm256_setr_epi8( ) -> __m256i { #[rustfmt::skip] transmute(i8x32::new( - e00, e01, e02, e03, e04, e05, e06, e07, + [e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, - e24, e25, e26, e27, e28, e29, e30, e31, + e24, e25, e26, e27, e28, e29, e30, e31] )) } @@ -2366,10 +2366,10 @@ pub unsafe fn _mm256_setr_epi16( ) -> __m256i { #[rustfmt::skip] transmute(i16x16::new( - e00, e01, e02, e03, + [e00, e01, e02, e03, e04, e05, e06, e07, e08, e09, e10, e11, - e12, e13, e14, e15, + e12, e13, e14, e15] )) } @@ -2391,7 +2391,7 @@ pub unsafe fn _mm256_setr_epi32( e6: i32, e7: i32, ) -> __m256i { - transmute(i32x8::new(e0, e1, e2, e3, e4, e5, e6, e7)) + transmute(i32x8::new([e0, e1, e2, e3, e4, e5, e6, e7])) } /// Sets packed 64-bit integers in returned vector with the supplied values in @@ -2403,7 +2403,7 @@ pub unsafe fn _mm256_setr_epi32( // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_setr_epi64x(a: i64, b: i64, c: i64, d: i64) -> __m256i { - transmute(i64x4::new(a, b, c, d)) + transmute(i64x4::new([a, b, c, d])) } /// Broadcasts double-precision (64-bit) floating-point value `a` to all @@ -2723,7 +2723,7 @@ pub unsafe fn _mm256_undefined_pd() -> __m256d { // This intrinsic has no corresponding instruction. #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm256_undefined_si256() -> __m256i { - __m256i(0, 0, 0, 0) + __m256i([0, 0, 0, 0]) } /// Sets packed __m256 returned vector with the supplied values. diff --git a/crates/core_arch/src/x86/avx512f.rs b/crates/core_arch/src/x86/avx512f.rs index 9b38a9f352..8f490988b7 100644 --- a/crates/core_arch/src/x86/avx512f.rs +++ b/crates/core_arch/src/x86/avx512f.rs @@ -15013,9 +15013,9 @@ pub unsafe fn _mm512_setr_epi32( e1: i32, e0: i32, ) -> __m512i { - let r = i32x16( + let r = i32x16([ e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, e2, e1, e0, - ); + ]); transmute(r) } @@ -15090,12 +15090,12 @@ pub unsafe fn _mm512_set_epi8( e1: i8, e0: i8, ) -> __m512i { - let r = i8x64( + let r = i8x64([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, e32, e33, e34, e35, e36, e37, e38, e39, e40, e41, e42, e43, e44, e45, e46, e47, e48, e49, e50, e51, e52, e53, e54, e55, e56, e57, e58, e59, e60, e61, e62, e63, - ); + ]); transmute(r) } @@ -15138,10 +15138,10 @@ pub unsafe fn _mm512_set_epi16( e1: i16, e0: i16, ) -> __m512i { - let r = i16x32( + let r = i16x32([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, e16, e17, e18, e19, e20, e21, e22, e23, e24, e25, e26, e27, e28, e29, e30, e31, - ); + ]); transmute(r) } @@ -15232,7 +15232,7 @@ pub unsafe fn _mm512_setr_epi64( e6: i64, e7: i64, ) -> __m512i { - let r = i64x8::new(e0, e1, e2, e3, e4, e5, e6, e7); + let r = i64x8::new([e0, e1, e2, e3, e4, e5, e6, e7]); transmute(r) } @@ -26207,9 +26207,9 @@ pub unsafe fn _mm512_setr_ps( e14: f32, e15: f32, ) -> __m512 { - let r = f32x16::new( + let r = f32x16::new([ e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, - ); + ]); transmute(r) } @@ -26437,7 +26437,7 @@ pub unsafe fn _mm_maskz_set1_epi64(k: __mmask8, a: i64) -> __m128i { #[inline] #[target_feature(enable = "avx512f")] pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { - let r = i64x8::new(d, c, b, a, d, c, b, a); + let r = i64x8::new([d, c, b, a, d, c, b, a]); transmute(r) } @@ -26447,7 +26447,7 @@ pub unsafe fn _mm512_set4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { #[inline] #[target_feature(enable = "avx512f")] pub unsafe fn _mm512_setr4_epi64(d: i64, c: i64, b: i64, a: i64) -> __m512i { - let r = i64x8::new(a, b, c, d, a, b, c, d); + let r = i64x8::new([a, b, c, d, a, b, c, d]); transmute(r) } @@ -32238,7 +32238,7 @@ pub unsafe fn _mm512_setr_pd( e6: f64, e7: f64, ) -> __m512d { - let r = f64x8::new(e0, e1, e2, e3, e4, e5, e6, e7); + let r = f64x8::new([e0, e1, e2, e3, e4, e5, e6, e7]); transmute(r) } diff --git a/crates/core_arch/src/x86/mod.rs b/crates/core_arch/src/x86/mod.rs index 37045e40e0..b9b2ce5c9b 100644 --- a/crates/core_arch/src/x86/mod.rs +++ b/crates/core_arch/src/x86/mod.rs @@ -48,7 +48,7 @@ types! { /// # } /// ``` #[stable(feature = "simd_x86", since = "1.27.0")] - pub struct __m128i(i64, i64); + pub struct __m128i([i64; 2]); /// 128-bit wide set of four `f32` types, x86-specific /// @@ -85,7 +85,7 @@ types! { /// # } /// ``` #[stable(feature = "simd_x86", since = "1.27.0")] - pub struct __m128(f32, f32, f32, f32); + pub struct __m128([f32; 4]); /// 128-bit wide set of two `f64` types, x86-specific /// @@ -122,7 +122,7 @@ types! { /// # } /// ``` #[stable(feature = "simd_x86", since = "1.27.0")] - pub struct __m128d(f64, f64); + pub struct __m128d([f64; 2]); /// 256-bit wide integer vector type, x86-specific /// @@ -163,7 +163,7 @@ types! { /// # } /// ``` #[stable(feature = "simd_x86", since = "1.27.0")] - pub struct __m256i(i64, i64, i64, i64); + pub struct __m256i([i64; 4]); /// 256-bit wide set of eight `f32` types, x86-specific /// @@ -200,7 +200,7 @@ types! { /// # } /// ``` #[stable(feature = "simd_x86", since = "1.27.0")] - pub struct __m256(f32, f32, f32, f32, f32, f32, f32, f32); + pub struct __m256([f32; 8]); /// 256-bit wide set of four `f64` types, x86-specific /// @@ -237,7 +237,7 @@ types! { /// # } /// ``` #[stable(feature = "simd_x86", since = "1.27.0")] - pub struct __m256d(f64, f64, f64, f64); + pub struct __m256d([f64; 4]); /// 512-bit wide integer vector type, x86-specific /// @@ -258,7 +258,7 @@ types! { /// /// Note that this means that an instance of `__m512i` typically just means /// a "bag of bits" which is left up to interpretation at the point of use. - pub struct __m512i(i64, i64, i64, i64, i64, i64, i64, i64); + pub struct __m512i([i64; 8]); /// 512-bit wide set of sixteen `f32` types, x86-specific /// @@ -275,10 +275,7 @@ types! { /// Most intrinsics using `__m512` are prefixed with `_mm512_` and are /// suffixed with "ps" (or otherwise contain "ps"). Not to be confused with /// "pd" which is used for `__m512d`. - pub struct __m512( - f32, f32, f32, f32, f32, f32, f32, f32, - f32, f32, f32, f32, f32, f32, f32, f32, - ); + pub struct __m512([f32; 16]); /// 512-bit wide set of eight `f64` types, x86-specific /// @@ -295,14 +292,14 @@ types! { /// Most intrinsics using `__m512d` are prefixed with `_mm512_` and are /// suffixed with "pd" (or otherwise contain "pd"). Not to be confused with /// "ps" which is used for `__m512`. - pub struct __m512d(f64, f64, f64, f64, f64, f64, f64, f64); + pub struct __m512d([f64; 8]); /// 128-bit wide set of eight 'u16' types, x86-specific /// /// This type is representing a 128-bit SIMD register which internally is consisted of /// eight packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. - pub struct __m128bh(u16, u16, u16, u16, u16, u16, u16, u16); + pub struct __m128bh([u16; 8]); /// 256-bit wide set of 16 'u16' types, x86-specific /// @@ -310,10 +307,7 @@ types! { /// representing a 256-bit SIMD register which internally is consisted of /// 16 packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. - pub struct __m256bh( - u16, u16, u16, u16, u16, u16, u16, u16, - u16, u16, u16, u16, u16, u16, u16, u16 - ); + pub struct __m256bh([u16; 16]); /// 512-bit wide set of 32 'u16' types, x86-specific /// @@ -321,12 +315,7 @@ types! { /// representing a 512-bit SIMD register which internally is consisted of /// 32 packed `u16` instances. Its purpose is for bf16 related intrinsic /// implementations. - pub struct __m512bh( - u16, u16, u16, u16, u16, u16, u16, u16, - u16, u16, u16, u16, u16, u16, u16, u16, - u16, u16, u16, u16, u16, u16, u16, u16, - u16, u16, u16, u16, u16, u16, u16, u16 - ); + pub struct __m512bh([u16; 32]); } /// The `__mmask64` type used in AVX-512 intrinsics, a 64-bit integer diff --git a/crates/core_arch/src/x86/sse.rs b/crates/core_arch/src/x86/sse.rs index 3d4471ba36..e33b955ea7 100644 --- a/crates/core_arch/src/x86/sse.rs +++ b/crates/core_arch/src/x86/sse.rs @@ -893,7 +893,7 @@ pub unsafe fn _mm_cvt_si2ss(a: __m128, b: i32) -> __m128 { #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_set_ss(a: f32) -> __m128 { - __m128(a, 0.0, 0.0, 0.0) + __m128([a, 0.0, 0.0, 0.0]) } /// Construct a `__m128` with all element set to `a`. @@ -904,7 +904,7 @@ pub unsafe fn _mm_set_ss(a: f32) -> __m128 { #[cfg_attr(test, assert_instr(shufps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_set1_ps(a: f32) -> __m128 { - __m128(a, a, a, a) + __m128([a, a, a, a]) } /// Alias for [`_mm_set1_ps`](fn._mm_set1_ps.html) @@ -942,7 +942,7 @@ pub unsafe fn _mm_set_ps1(a: f32) -> __m128 { #[cfg_attr(test, assert_instr(unpcklps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { - __m128(d, c, b, a) + __m128([d, c, b, a]) } /// Construct a `__m128` from four floating point values lowest to highest. @@ -968,7 +968,7 @@ pub unsafe fn _mm_set_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { )] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { - __m128(a, b, c, d) + __m128([a, b, c, d]) } /// Construct a `__m128` with all elements initialized to zero. @@ -979,7 +979,7 @@ pub unsafe fn _mm_setr_ps(a: f32, b: f32, c: f32, d: f32) -> __m128 { #[cfg_attr(test, assert_instr(xorps))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_setzero_ps() -> __m128 { - __m128(0.0, 0.0, 0.0, 0.0) + __m128([0.0, 0.0, 0.0, 0.0]) } /// A utility function for creating masks to use with Intel shuffle and @@ -1097,7 +1097,7 @@ pub unsafe fn _mm_movemask_ps(a: __m128) -> i32 { #[cfg_attr(test, assert_instr(movss))] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_load_ss(p: *const f32) -> __m128 { - __m128(*p, 0.0, 0.0, 0.0) + __m128([*p, 0.0, 0.0, 0.0]) } /// Construct a `__m128` by duplicating the value read from `p` into all @@ -1113,7 +1113,7 @@ pub unsafe fn _mm_load_ss(p: *const f32) -> __m128 { #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_load1_ps(p: *const f32) -> __m128 { let a = *p; - __m128(a, a, a, a) + __m128([a, a, a, a]) } /// Alias for [`_mm_load1_ps`](fn._mm_load1_ps.html) @@ -1210,7 +1210,7 @@ pub unsafe fn _mm_loadr_ps(p: *const f32) -> __m128 { #[target_feature(enable = "sse")] #[stable(feature = "simd_x86_mm_loadu_si64", since = "1.46.0")] pub unsafe fn _mm_loadu_si64(mem_addr: *const u8) -> __m128i { - transmute(i64x2(ptr::read_unaligned(mem_addr as *const i64), 0)) + transmute(i64x2([ptr::read_unaligned(mem_addr as *const i64), 0])) } /// Stores the lowest 32 bit float of `a` into memory. diff --git a/crates/core_arch/src/x86/sse2.rs b/crates/core_arch/src/x86/sse2.rs index f4fdb50469..b51c855116 100644 --- a/crates/core_arch/src/x86/sse2.rs +++ b/crates/core_arch/src/x86/sse2.rs @@ -951,7 +951,7 @@ pub unsafe fn _mm_cvtps_epi32(a: __m128) -> __m128i { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_cvtsi32_si128(a: i32) -> __m128i { - transmute(i32x4::new(a, 0, 0, 0)) + transmute(i32x4::new([a, 0, 0, 0])) } /// Returns the lowest element of `a`. @@ -973,7 +973,7 @@ pub unsafe fn _mm_cvtsi128_si32(a: __m128i) -> i32 { // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_set_epi64x(e1: i64, e0: i64) -> __m128i { - transmute(i64x2::new(e0, e1)) + transmute(i64x2::new([e0, e1])) } /// Sets packed 32-bit integers with the supplied values. @@ -984,7 +984,7 @@ pub unsafe fn _mm_set_epi64x(e1: i64, e0: i64) -> __m128i { // no particular instruction to test #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_set_epi32(e3: i32, e2: i32, e1: i32, e0: i32) -> __m128i { - transmute(i32x4::new(e0, e1, e2, e3)) + transmute(i32x4::new([e0, e1, e2, e3])) } /// Sets packed 16-bit integers with the supplied values. @@ -1004,7 +1004,7 @@ pub unsafe fn _mm_set_epi16( e1: i16, e0: i16, ) -> __m128i { - transmute(i16x8::new(e0, e1, e2, e3, e4, e5, e6, e7)) + transmute(i16x8::new([e0, e1, e2, e3, e4, e5, e6, e7])) } /// Sets packed 8-bit integers with the supplied values. @@ -1034,7 +1034,7 @@ pub unsafe fn _mm_set_epi8( ) -> __m128i { #[rustfmt::skip] transmute(i8x16::new( - e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15, + [e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12, e13, e14, e15] )) } @@ -2371,7 +2371,7 @@ pub unsafe fn _mm_set_pd1(a: f64) -> __m128d { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_set_pd(a: f64, b: f64) -> __m128d { - __m128d(b, a) + __m128d([b, a]) } /// Sets packed double-precision (64-bit) floating-point elements in the return @@ -2746,7 +2746,7 @@ pub unsafe fn _mm_castsi128_ps(a: __m128i) -> __m128 { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_undefined_pd() -> __m128d { - __m128d(0.0, 0.0) + __m128d([0.0, 0.0]) } /// Returns vector of type __m128i with indeterminate elements. @@ -2758,7 +2758,7 @@ pub unsafe fn _mm_undefined_pd() -> __m128d { #[target_feature(enable = "sse2")] #[stable(feature = "simd_x86", since = "1.27.0")] pub unsafe fn _mm_undefined_si128() -> __m128i { - __m128i(0, 0) + __m128i([0, 0]) } /// The resulting `__m128d` element is composed by the low-order values of From f2d8f7c405b6ab520e6550221f865c1b15e73f1c Mon Sep 17 00:00:00 2001 From: Ezra Shaw Date: Sat, 6 May 2023 12:24:59 +1200 Subject: [PATCH 2/4] move to array-simd for more architectures --- crates/core_arch/src/aarch64/neon/mod.rs | 8 +-- crates/core_arch/src/arm/dsp.rs | 4 +- crates/core_arch/src/arm/simd32.rs | 4 +- crates/core_arch/src/arm_shared/neon/mod.rs | 75 ++++++++------------- crates/core_arch/src/mips/msa.rs | 42 +++--------- crates/core_arch/src/powerpc/altivec.rs | 23 +++---- crates/core_arch/src/powerpc/vsx.rs | 8 +-- 7 files changed, 60 insertions(+), 104 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/mod.rs b/crates/core_arch/src/aarch64/neon/mod.rs index 8506570337..c5f941d3dc 100644 --- a/crates/core_arch/src/aarch64/neon/mod.rs +++ b/crates/core_arch/src/aarch64/neon/mod.rs @@ -21,10 +21,10 @@ use stdarch_test::assert_instr; types! { /// ARM-specific 64-bit wide vector of one packed `f64`. #[stable(feature = "neon_intrinsics", since = "1.59.0")] - pub struct float64x1_t(f64); // FIXME: check this! + pub struct float64x1_t([f64; 1]); // FIXME: check this! /// ARM-specific 128-bit wide vector of two packed `f64`. #[stable(feature = "neon_intrinsics", since = "1.59.0")] - pub struct float64x2_t(f64, f64); + pub struct float64x2_t([f64; 2]); } /// ARM-specific type containing two `float64x1_t` vectors. @@ -1980,7 +1980,7 @@ pub unsafe fn vdup_n_p64(value: p64) -> poly64x1_t { #[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdup_n_f64(value: f64) -> float64x1_t { - float64x1_t(value) + float64x1_t([value]) } /// Duplicate vector element to vector or scalar @@ -1998,7 +1998,7 @@ pub unsafe fn vdupq_n_p64(value: p64) -> poly64x2_t { #[cfg_attr(test, assert_instr(dup))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vdupq_n_f64(value: f64) -> float64x2_t { - float64x2_t(value, value) + float64x2_t([value, value]) } /// Duplicate vector element to vector or scalar diff --git a/crates/core_arch/src/arm/dsp.rs b/crates/core_arch/src/arm/dsp.rs index 6720f97a53..670742c002 100644 --- a/crates/core_arch/src/arm/dsp.rs +++ b/crates/core_arch/src/arm/dsp.rs @@ -27,9 +27,9 @@ use crate::mem::transmute; types! { /// ARM-specific 32-bit wide vector of two packed `i16`. - pub struct int16x2_t(i16, i16); + pub struct int16x2_t([i16; 2]); /// ARM-specific 32-bit wide vector of two packed `u16`. - pub struct uint16x2_t(u16, u16); + pub struct uint16x2_t([u16; 2]); } extern "unadjusted" { diff --git a/crates/core_arch/src/arm/simd32.rs b/crates/core_arch/src/arm/simd32.rs index 2d867acc83..74e1e92a9b 100644 --- a/crates/core_arch/src/arm/simd32.rs +++ b/crates/core_arch/src/arm/simd32.rs @@ -69,9 +69,9 @@ use crate::{core_arch::arm::dsp::int16x2_t, mem::transmute}; types! { /// ARM-specific 32-bit wide vector of four packed `i8`. - pub struct int8x4_t(i8, i8, i8, i8); + pub struct int8x4_t([i8; 4]); /// ARM-specific 32-bit wide vector of four packed `u8`. - pub struct uint8x4_t(u8, u8, u8, u8); + pub struct uint8x4_t([u8; 4]); } macro_rules! dsp_call { diff --git a/crates/core_arch/src/arm_shared/neon/mod.rs b/crates/core_arch/src/arm_shared/neon/mod.rs index 8a8f4febf6..21abdca815 100644 --- a/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/crates/core_arch/src/arm_shared/neon/mod.rs @@ -19,90 +19,81 @@ pub(crate) type p128 = u128; types! { /// ARM-specific 64-bit wide vector of eight packed `i8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int8x8_t(pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8); + pub struct int8x8_t(pub(crate) [i8; 8]); /// ARM-specific 64-bit wide vector of eight packed `u8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint8x8_t(pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8); + pub struct uint8x8_t(pub(crate) [u8; 8]); /// ARM-specific 64-bit wide polynomial vector of eight packed `p8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct poly8x8_t(pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8); + pub struct poly8x8_t(pub(crate) [p8; 8]); /// ARM-specific 64-bit wide vector of four packed `i16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int16x4_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); + pub struct int16x4_t(pub(crate) [i16; 4]); /// ARM-specific 64-bit wide vector of four packed `u16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint16x4_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); + pub struct uint16x4_t(pub(crate) [u16; 4]); // FIXME: ARM-specific 64-bit wide vector of four packed `f16`. // pub struct float16x4_t(f16, f16, f16, f16); /// ARM-specific 64-bit wide vector of four packed `p16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct poly16x4_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); + pub struct poly16x4_t(pub(crate) [p16; 4]); /// ARM-specific 64-bit wide vector of two packed `i32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int32x2_t(pub(crate) i32, pub(crate) i32); + pub struct int32x2_t(pub(crate) [i32; 2]); /// ARM-specific 64-bit wide vector of two packed `u32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint32x2_t(pub(crate) u32, pub(crate) u32); + pub struct uint32x2_t(pub(crate) [u32; 2]); /// ARM-specific 64-bit wide vector of two packed `f32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct float32x2_t(pub(crate) f32, pub(crate) f32); + pub struct float32x2_t(pub(crate) [f32; 2]); /// ARM-specific 64-bit wide vector of one packed `i64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int64x1_t(pub(crate) i64); + pub struct int64x1_t(pub(crate) [i64; 1]); /// ARM-specific 64-bit wide vector of one packed `u64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint64x1_t(pub(crate) u64); + pub struct uint64x1_t(pub(crate) [u64; 1]); /// ARM-specific 64-bit wide vector of one packed `p64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct poly64x1_t(pub(crate) p64); + pub struct poly64x1_t(pub(crate) [p64; 1]); /// ARM-specific 128-bit wide vector of sixteen packed `i8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int8x16_t( - pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, - pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8, pub(crate) i8 , pub(crate) i8, pub(crate) i8, - ); + pub struct int8x16_t(pub(crate) [i8; 16]); /// ARM-specific 128-bit wide vector of sixteen packed `u8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint8x16_t( - pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, - pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, pub(crate) u8, pub(crate) u8 , pub(crate) u8, pub(crate) u8, - ); + pub struct uint8x16_t(pub(crate) [u8; 16]); /// ARM-specific 128-bit wide vector of sixteen packed `p8`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct poly8x16_t( - pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, - pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, pub(crate) p8, - ); + pub struct poly8x16_t(pub(crate) [p8; 16]); /// ARM-specific 128-bit wide vector of eight packed `i16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int16x8_t(pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16, pub(crate) i16); + pub struct int16x8_t(pub(crate) [i16; 8]); /// ARM-specific 128-bit wide vector of eight packed `u16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint16x8_t(pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16, pub(crate) u16); + pub struct uint16x8_t(pub(crate) [u16; 8]); // FIXME: ARM-specific 128-bit wide vector of eight packed `f16`. // pub struct float16x8_t(f16, f16, f16, f16, f16, f16, f16); /// ARM-specific 128-bit wide vector of eight packed `p16`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct poly16x8_t(pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16, pub(crate) p16); + pub struct poly16x8_t(pub(crate) [p16; 8]); /// ARM-specific 128-bit wide vector of four packed `i32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int32x4_t(pub(crate) i32, pub(crate) i32, pub(crate) i32, pub(crate) i32); + pub struct int32x4_t(pub(crate) [i32; 4]); /// ARM-specific 128-bit wide vector of four packed `u32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint32x4_t(pub(crate) u32, pub(crate) u32, pub(crate) u32, pub(crate) u32); + pub struct uint32x4_t(pub(crate) [u32; 4]); /// ARM-specific 128-bit wide vector of four packed `f32`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct float32x4_t(pub(crate) f32, pub(crate) f32, pub(crate) f32, pub(crate) f32); + pub struct float32x4_t(pub(crate) [f32; 4]); /// ARM-specific 128-bit wide vector of two packed `i64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct int64x2_t(pub(crate) i64, pub(crate) i64); + pub struct int64x2_t(pub(crate) [i64; 2]); /// ARM-specific 128-bit wide vector of two packed `u64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct uint64x2_t(pub(crate) u64, pub(crate) u64); + pub struct uint64x2_t(pub(crate) [u64; 2]); /// ARM-specific 128-bit wide vector of two packed `p64`. #[cfg_attr(not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0"))] - pub struct poly64x2_t(pub(crate) p64, pub(crate) p64); + pub struct poly64x2_t(pub(crate) [p64; 2]); } /// ARM-specific type containing two `int8x8_t` vectors. @@ -738,12 +729,7 @@ pub struct poly64x1x3_t(pub poly64x1_t, pub poly64x1_t, pub poly64x1_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub struct poly64x1x4_t( - pub poly64x1_t, - pub poly64x1_t, - pub poly64x1_t, - pub poly64x1_t, -); +pub struct poly64x1x4_t(pub [poly64x1_t; 4]); /// ARM-specific type containing four `poly64x2_t` vectors. #[repr(C)] @@ -752,7 +738,7 @@ pub struct poly64x1x4_t( not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t); +pub struct poly64x2x2_t(pub [poly64x2_t; 2]); /// ARM-specific type containing four `poly64x2_t` vectors. #[repr(C)] #[derive(Copy, Clone, Debug)] @@ -760,7 +746,7 @@ pub struct poly64x2x2_t(pub poly64x2_t, pub poly64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t); +pub struct poly64x2x3_t(pub [poly64x2_t; 3]); /// ARM-specific type containing four `poly64x2_t` vectors. #[repr(C)] #[derive(Copy, Clone, Debug)] @@ -768,12 +754,7 @@ pub struct poly64x2x3_t(pub poly64x2_t, pub poly64x2_t, pub poly64x2_t); not(target_arch = "arm"), stable(feature = "neon_intrinsics", since = "1.59.0") )] -pub struct poly64x2x4_t( - pub poly64x2_t, - pub poly64x2_t, - pub poly64x2_t, - pub poly64x2_t, -); +pub struct poly64x2x4_t(pub [poly64x2_t; 4]); #[allow(improper_ctypes)] extern "unadjusted" { diff --git a/crates/core_arch/src/mips/msa.rs b/crates/core_arch/src/mips/msa.rs index 3e93db85e2..67a13de1e8 100644 --- a/crates/core_arch/src/mips/msa.rs +++ b/crates/core_arch/src/mips/msa.rs @@ -12,56 +12,34 @@ use crate::mem; types! { // / MIPS-specific 128-bit wide vector of 16 packed `i8`. - pub struct v16i8( - i8, i8, i8, i8, i8, i8, i8, i8, - i8, i8, i8, i8, i8, i8, i8, i8, - ); + pub struct v16i8([i8; 16]); // / MIPS-specific 128-bit wide vector of 8 packed `i16`. - pub struct v8i16( - i16, i16, i16, i16, i16, i16, i16, i16, - ); + pub struct v8i16([i16; 8]); // / MIPS-specific 128-bit wide vector of 4 packed `i32`. - pub struct v4i32( - i32, i32, i32, i32, - ); + pub struct v4i32([i32; 4]); // / MIPS-specific 128-bit wide vector of 2 packed `i64`. - pub struct v2i64( - i64, i64, - ); + pub struct v2i64([i64; 2]); // / MIPS-specific 128-bit wide vector of 16 packed `u8`. - pub struct v16u8( - u8, u8, u8, u8, u8, u8, u8, u8, - u8, u8, u8, u8, u8, u8, u8, u8, - ); + pub struct v16u8([u8; 16]); // / MIPS-specific 128-bit wide vector of 8 packed `u16`. - pub struct v8u16( - u16, u16, u16, u16, u16, u16, u16, u16, - ); + pub struct v8u16([u16; 8]); // / MIPS-specific 128-bit wide vector of 4 packed `u32`. - pub struct v4u32( - u32, u32, u32, u32, - ); + pub struct v4u32([u32; 4]); // / MIPS-specific 128-bit wide vector of 2 packed `u64`. - pub struct v2u64( - u64, u64, - ); + pub struct v2u64([u64; 2]); // / MIPS-specific 128-bit wide vector of 4 packed `f32`. - pub struct v4f32( - f32, f32, f32, f32, - ); + pub struct v4f32([f32; 4]); // / MIPS-specific 128-bit wide vector of 2 packed `f64`. - pub struct v2f64( - f64, f64, - ); + pub struct v2f64([f64; 2]); } #[allow(improper_ctypes)] diff --git a/crates/core_arch/src/powerpc/altivec.rs b/crates/core_arch/src/powerpc/altivec.rs index 8f6daf13f1..396b2afe6b 100644 --- a/crates/core_arch/src/powerpc/altivec.rs +++ b/crates/core_arch/src/powerpc/altivec.rs @@ -23,30 +23,27 @@ use stdarch_test::assert_instr; types! { /// PowerPC-specific 128-bit wide vector of sixteen packed `i8` - pub struct vector_signed_char(i8, i8, i8, i8, i8, i8, i8, i8, - i8, i8, i8, i8, i8, i8, i8, i8); + pub struct vector_signed_char([i8; 16]); /// PowerPC-specific 128-bit wide vector of sixteen packed `u8` - pub struct vector_unsigned_char(u8, u8, u8, u8, u8, u8, u8, u8, - u8, u8, u8, u8, u8, u8, u8, u8); + pub struct vector_unsigned_char([u8; 16]); /// PowerPC-specific 128-bit wide vector mask of sixteen packed elements - pub struct vector_bool_char(i8, i8, i8, i8, i8, i8, i8, i8, - i8, i8, i8, i8, i8, i8, i8, i8); + pub struct vector_bool_char([i8; 16]); /// PowerPC-specific 128-bit wide vector of eight packed `i16` - pub struct vector_signed_short(i16, i16, i16, i16, i16, i16, i16, i16); + pub struct vector_signed_short([i16; 8]); /// PowerPC-specific 128-bit wide vector of eight packed `u16` - pub struct vector_unsigned_short(u16, u16, u16, u16, u16, u16, u16, u16); + pub struct vector_unsigned_short([u16; 8]); /// PowerPC-specific 128-bit wide vector mask of eight packed elements - pub struct vector_bool_short(i16, i16, i16, i16, i16, i16, i16, i16); + pub struct vector_bool_short([i16; 8]); // pub struct vector_pixel(???); /// PowerPC-specific 128-bit wide vector of four packed `i32` - pub struct vector_signed_int(i32, i32, i32, i32); + pub struct vector_signed_int([i32; 4]); /// PowerPC-specific 128-bit wide vector of four packed `u32` - pub struct vector_unsigned_int(u32, u32, u32, u32); + pub struct vector_unsigned_int([u32; 4]); /// PowerPC-specific 128-bit wide vector mask of four packed elements - pub struct vector_bool_int(i32, i32, i32, i32); + pub struct vector_bool_int([i32; 4]); /// PowerPC-specific 128-bit wide vector of four packed `f32` - pub struct vector_float(f32, f32, f32, f32); + pub struct vector_float([f32; 4]); } #[allow(improper_ctypes)] diff --git a/crates/core_arch/src/powerpc/vsx.rs b/crates/core_arch/src/powerpc/vsx.rs index f2ebc23b21..bfb8057796 100644 --- a/crates/core_arch/src/powerpc/vsx.rs +++ b/crates/core_arch/src/powerpc/vsx.rs @@ -18,13 +18,13 @@ use crate::mem::transmute; types! { // pub struct vector_Float16 = f16x8; /// PowerPC-specific 128-bit wide vector of two packed `i64` - pub struct vector_signed_long(i64, i64); + pub struct vector_signed_long([i64; 2]); /// PowerPC-specific 128-bit wide vector of two packed `u64` - pub struct vector_unsigned_long(u64, u64); + pub struct vector_unsigned_long([u64; 2]); /// PowerPC-specific 128-bit wide vector mask of two `i64` - pub struct vector_bool_long(i64, i64); + pub struct vector_bool_long([i64; 2]); /// PowerPC-specific 128-bit wide vector of two packed `f64` - pub struct vector_double(f64, f64); + pub struct vector_double([f64; 2]); // pub struct vector_signed_long_long = vector_signed_long; // pub struct vector_unsigned_long_long = vector_unsigned_long; // pub struct vector_bool_long_long = vector_bool_long; From f73cac88b1f6426cfd4335ad9e11d58ca93c8171 Mon Sep 17 00:00:00 2001 From: Ezra Shaw Date: Sat, 6 May 2023 12:32:19 +1200 Subject: [PATCH 3/4] fix types macro --- crates/core_arch/src/macros.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/core_arch/src/macros.rs b/crates/core_arch/src/macros.rs index a75cb4ec2b..bbeb31ceb6 100644 --- a/crates/core_arch/src/macros.rs +++ b/crates/core_arch/src/macros.rs @@ -52,14 +52,14 @@ macro_rules! static_assert_simm_bits { macro_rules! types { ($( $(#[$doc:meta])* - pub struct $name:ident($field:tt); + pub struct $name:ident($( $field:tt )+); )*) => ($( $(#[$doc])* #[derive(Copy, Clone, Debug)] #[allow(non_camel_case_types)] #[repr(simd)] #[allow(clippy::missing_inline_in_public_items)] - pub struct $name($field); + pub struct $name($( $field )+); )*) } From e10c1ade868f974d6752af628c5d80de2c029dea Mon Sep 17 00:00:00 2001 From: Ezra Shaw Date: Sat, 6 May 2023 12:43:09 +1200 Subject: [PATCH 4/4] more fixing --- .../core_arch/src/aarch64/neon/generated.rs | 178 +++--- .../src/arm_shared/neon/generated.rs | 588 +++++++++--------- crates/stdarch-gen/src/main.rs | 4 +- 3 files changed, 385 insertions(+), 385 deletions(-) diff --git a/crates/core_arch/src/aarch64/neon/generated.rs b/crates/core_arch/src/aarch64/neon/generated.rs index da7fdf8b1f..62abacaf27 100644 --- a/crates/core_arch/src/aarch64/neon/generated.rs +++ b/crates/core_arch/src/aarch64/neon/generated.rs @@ -926,7 +926,7 @@ pub unsafe fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64) #[inline] @@ -937,7 +937,7 @@ pub unsafe fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64) #[inline] @@ -21106,7 +21106,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_f64_x2() { let a: [f64; 5] = [0., 1., 2., 3., 4.]; - let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(3., 4.)]; + let e: [f64x2; 2] = [f64x2::new([1., 2.]), f64x2::new([3., 4.])]; let r: [f64x2; 2] = transmute(vld1q_f64_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21122,7 +21122,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_f64_x3() { let a: [f64; 7] = [0., 1., 2., 3., 4., 5., 6.]; - let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.)]; + let e: [f64x2; 3] = [f64x2::new([1., 2.]), f64x2::new([3., 4.]), f64x2::new([5., 6.])]; let r: [f64x2; 3] = transmute(vld1q_f64_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21138,7 +21138,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_f64_x4() { let a: [f64; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; - let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(3., 4.), f64x2::new(5., 6.), f64x2::new(7., 8.)]; + let e: [f64x2; 4] = [f64x2::new([1., 2.]), f64x2::new([3., 4.]), f64x2::new([5., 6.]), f64x2::new([7., 8.])]; let r: [f64x2; 4] = transmute(vld1q_f64_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21146,7 +21146,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_s64() { let a: [i64; 5] = [0, 1, 2, 2, 3]; - let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 3)]; + let e: [i64x2; 2] = [i64x2::new([1, 2]), i64x2::new([2, 3])]; let r: [i64x2; 2] = transmute(vld2q_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21154,7 +21154,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_u64() { let a: [u64; 5] = [0, 1, 2, 2, 3]; - let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(2, 3)]; + let e: [u64x2; 2] = [u64x2::new([1, 2]), u64x2::new([2, 3])]; let r: [u64x2; 2] = transmute(vld2q_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21162,7 +21162,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_p64() { let a: [u64; 5] = [0, 1, 2, 2, 3]; - let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 3)]; + let e: [i64x2; 2] = [i64x2::new([1, 2]), i64x2::new([2, 3])]; let r: [i64x2; 2] = transmute(vld2q_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21178,7 +21178,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_f64() { let a: [f64; 5] = [0., 1., 2., 2., 3.]; - let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(2., 3.)]; + let e: [f64x2; 2] = [f64x2::new([1., 2.]), f64x2::new([2., 3.])]; let r: [f64x2; 2] = transmute(vld2q_f64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21186,7 +21186,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_s64() { let a: [i64; 5] = [0, 1, 1, 2, 3]; - let e: [i64x2; 2] = [i64x2::new(1, 1), i64x2::new(1, 1)]; + let e: [i64x2; 2] = [i64x2::new([1, 1]), i64x2::new([1, 1])]; let r: [i64x2; 2] = transmute(vld2q_dup_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21194,7 +21194,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_u64() { let a: [u64; 5] = [0, 1, 1, 2, 3]; - let e: [u64x2; 2] = [u64x2::new(1, 1), u64x2::new(1, 1)]; + let e: [u64x2; 2] = [u64x2::new([1, 1]), u64x2::new([1, 1])]; let r: [u64x2; 2] = transmute(vld2q_dup_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21202,7 +21202,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_p64() { let a: [u64; 5] = [0, 1, 1, 2, 3]; - let e: [i64x2; 2] = [i64x2::new(1, 1), i64x2::new(1, 1)]; + let e: [i64x2; 2] = [i64x2::new([1, 1]), i64x2::new([1, 1])]; let r: [i64x2; 2] = transmute(vld2q_dup_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21218,7 +21218,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_f64() { let a: [f64; 5] = [0., 1., 1., 2., 3.]; - let e: [f64x2; 2] = [f64x2::new(1., 1.), f64x2::new(1., 1.)]; + let e: [f64x2; 2] = [f64x2::new([1., 1.]), f64x2::new([1., 1.])]; let r: [f64x2; 2] = transmute(vld2q_dup_f64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21226,8 +21226,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_s8() { let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x16; 2] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i8x16; 2] = [i8x16::new([0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i8x16; 2] = [i8x16::new([1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])]; let r: [i8x16; 2] = transmute(vld2q_lane_s8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21235,8 +21235,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_s64() { let a: [i64; 3] = [0, 1, 2]; - let b: [i64x1; 2] = [i64x1::new(0), i64x1::new(2)]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)]; + let b: [i64x1; 2] = [i64x1::new([0]), i64x1::new([2])]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([2])]; let r: [i64x1; 2] = transmute(vld2_lane_s64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21244,8 +21244,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_s64() { let a: [i64; 5] = [0, 1, 2, 3, 4]; - let b: [i64x2; 2] = [i64x2::new(0, 2), i64x2::new(2, 14)]; - let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 14)]; + let b: [i64x2; 2] = [i64x2::new([0, 2]), i64x2::new([2, 14])]; + let e: [i64x2; 2] = [i64x2::new([1, 2]), i64x2::new([2, 14])]; let r: [i64x2; 2] = transmute(vld2q_lane_s64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21253,8 +21253,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_p64() { let a: [u64; 3] = [0, 1, 2]; - let b: [i64x1; 2] = [i64x1::new(0), i64x1::new(2)]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)]; + let b: [i64x1; 2] = [i64x1::new([0]), i64x1::new([2])]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([2])]; let r: [i64x1; 2] = transmute(vld2_lane_p64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21262,8 +21262,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_p64() { let a: [u64; 5] = [0, 1, 2, 3, 4]; - let b: [i64x2; 2] = [i64x2::new(0, 2), i64x2::new(2, 14)]; - let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(2, 14)]; + let b: [i64x2; 2] = [i64x2::new([0, 2]), i64x2::new([2, 14])]; + let e: [i64x2; 2] = [i64x2::new([1, 2]), i64x2::new([2, 14])]; let r: [i64x2; 2] = transmute(vld2q_lane_p64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21271,8 +21271,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_u8() { let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u8x16; 2] = [u8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)]; - let e: [u8x16; 2] = [u8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)]; + let b: [u8x16; 2] = [u8x16::new([0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])]; + let e: [u8x16; 2] = [u8x16::new([1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])]; let r: [u8x16; 2] = transmute(vld2q_lane_u8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21280,8 +21280,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_u64() { let a: [u64; 3] = [0, 1, 2]; - let b: [u64x1; 2] = [u64x1::new(0), u64x1::new(2)]; - let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)]; + let b: [u64x1; 2] = [u64x1::new([0]), u64x1::new([2])]; + let e: [u64x1; 2] = [u64x1::new([1]), u64x1::new([2])]; let r: [u64x1; 2] = transmute(vld2_lane_u64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21289,8 +21289,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_u64() { let a: [u64; 5] = [0, 1, 2, 3, 4]; - let b: [u64x2; 2] = [u64x2::new(0, 2), u64x2::new(2, 14)]; - let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(2, 14)]; + let b: [u64x2; 2] = [u64x2::new([0, 2]), u64x2::new([2, 14])]; + let e: [u64x2; 2] = [u64x2::new([1, 2]), u64x2::new([2, 14])]; let r: [u64x2; 2] = transmute(vld2q_lane_u64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21298,8 +21298,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_p8() { let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x16; 2] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i8x16; 2] = [i8x16::new([0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i8x16; 2] = [i8x16::new([1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26])]; let r: [i8x16; 2] = transmute(vld2q_lane_p8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21316,8 +21316,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_f64() { let a: [f64; 5] = [0., 1., 2., 3., 4.]; - let b: [f64x2; 2] = [f64x2::new(0., 2.), f64x2::new(2., 14.)]; - let e: [f64x2; 2] = [f64x2::new(1., 2.), f64x2::new(2., 14.)]; + let b: [f64x2; 2] = [f64x2::new([0., 2.]), f64x2::new([2., 14.])]; + let e: [f64x2; 2] = [f64x2::new([1., 2.]), f64x2::new([2., 14.])]; let r: [f64x2; 2] = transmute(vld2q_lane_f64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21325,7 +21325,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_s64() { let a: [i64; 7] = [0, 1, 2, 2, 2, 4, 4]; - let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 4), i64x2::new(2, 4)]; + let e: [i64x2; 3] = [i64x2::new([1, 2]), i64x2::new([2, 4]), i64x2::new([2, 4])]; let r: [i64x2; 3] = transmute(vld3q_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21333,7 +21333,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_u64() { let a: [u64; 7] = [0, 1, 2, 2, 2, 4, 4]; - let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(2, 4), u64x2::new(2, 4)]; + let e: [u64x2; 3] = [u64x2::new([1, 2]), u64x2::new([2, 4]), u64x2::new([2, 4])]; let r: [u64x2; 3] = transmute(vld3q_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21341,7 +21341,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_p64() { let a: [u64; 7] = [0, 1, 2, 2, 2, 4, 4]; - let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 4), i64x2::new(2, 4)]; + let e: [i64x2; 3] = [i64x2::new([1, 2]), i64x2::new([2, 4]), i64x2::new([2, 4])]; let r: [i64x2; 3] = transmute(vld3q_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21357,7 +21357,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_f64() { let a: [f64; 7] = [0., 1., 2., 2., 2., 4., 4.]; - let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(2., 4.), f64x2::new(2., 4.)]; + let e: [f64x2; 3] = [f64x2::new([1., 2.]), f64x2::new([2., 4.]), f64x2::new([2., 4.])]; let r: [f64x2; 3] = transmute(vld3q_f64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21365,7 +21365,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_s64() { let a: [i64; 7] = [0, 1, 1, 1, 3, 1, 4]; - let e: [i64x2; 3] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)]; + let e: [i64x2; 3] = [i64x2::new([1, 1]), i64x2::new([1, 1]), i64x2::new([1, 1])]; let r: [i64x2; 3] = transmute(vld3q_dup_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21373,7 +21373,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_u64() { let a: [u64; 7] = [0, 1, 1, 1, 3, 1, 4]; - let e: [u64x2; 3] = [u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1)]; + let e: [u64x2; 3] = [u64x2::new([1, 1]), u64x2::new([1, 1]), u64x2::new([1, 1])]; let r: [u64x2; 3] = transmute(vld3q_dup_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21381,7 +21381,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_p64() { let a: [u64; 7] = [0, 1, 1, 1, 3, 1, 4]; - let e: [i64x2; 3] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)]; + let e: [i64x2; 3] = [i64x2::new([1, 1]), i64x2::new([1, 1]), i64x2::new([1, 1])]; let r: [i64x2; 3] = transmute(vld3q_dup_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21397,7 +21397,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_f64() { let a: [f64; 7] = [0., 1., 1., 1., 3., 1., 4.]; - let e: [f64x2; 3] = [f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.)]; + let e: [f64x2; 3] = [f64x2::new([1., 1.]), f64x2::new([1., 1.]), f64x2::new([1., 1.])]; let r: [f64x2; 3] = transmute(vld3q_dup_f64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21405,8 +21405,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_s8() { let a: [i8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x16; 3] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)]; - let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)]; + let b: [i8x16; 3] = [i8x16::new([0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8])]; + let e: [i8x16; 3] = [i8x16::new([1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8])]; let r: [i8x16; 3] = transmute(vld3q_lane_s8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21414,8 +21414,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_s64() { let a: [i64; 4] = [0, 1, 2, 2]; - let b: [i64x1; 3] = [i64x1::new(0), i64x1::new(2), i64x1::new(2)]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)]; + let b: [i64x1; 3] = [i64x1::new([0]), i64x1::new([2]), i64x1::new([2])]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2])]; let r: [i64x1; 3] = transmute(vld3_lane_s64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21423,8 +21423,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_s64() { let a: [i64; 7] = [0, 1, 2, 2, 4, 5, 6]; - let b: [i64x2; 3] = [i64x2::new(0, 2), i64x2::new(2, 14), i64x2::new(2, 16)]; - let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 14), i64x2::new(2, 16)]; + let b: [i64x2; 3] = [i64x2::new([0, 2]), i64x2::new([2, 14]), i64x2::new([2, 16])]; + let e: [i64x2; 3] = [i64x2::new([1, 2]), i64x2::new([2, 14]), i64x2::new([2, 16])]; let r: [i64x2; 3] = transmute(vld3q_lane_s64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21432,8 +21432,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_p64() { let a: [u64; 4] = [0, 1, 2, 2]; - let b: [i64x1; 3] = [i64x1::new(0), i64x1::new(2), i64x1::new(2)]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)]; + let b: [i64x1; 3] = [i64x1::new([0]), i64x1::new([2]), i64x1::new([2])]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2])]; let r: [i64x1; 3] = transmute(vld3_lane_p64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21441,8 +21441,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_p64() { let a: [u64; 7] = [0, 1, 2, 2, 4, 5, 6]; - let b: [i64x2; 3] = [i64x2::new(0, 2), i64x2::new(2, 14), i64x2::new(2, 16)]; - let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(2, 14), i64x2::new(2, 16)]; + let b: [i64x2; 3] = [i64x2::new([0, 2]), i64x2::new([2, 14]), i64x2::new([2, 16])]; + let e: [i64x2; 3] = [i64x2::new([1, 2]), i64x2::new([2, 14]), i64x2::new([2, 16])]; let r: [i64x2; 3] = transmute(vld3q_lane_p64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21450,8 +21450,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_p8() { let a: [u8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x16; 3] = [i8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)]; - let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)]; + let b: [i8x16; 3] = [i8x16::new([0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8])]; + let e: [i8x16; 3] = [i8x16::new([1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8])]; let r: [i8x16; 3] = transmute(vld3q_lane_p8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21459,8 +21459,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_u8() { let a: [u8; 49] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u8x16; 3] = [u8x16::new(0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)]; - let e: [u8x16; 3] = [u8x16::new(1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8)]; + let b: [u8x16; 3] = [u8x16::new([0, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8])]; + let e: [u8x16; 3] = [u8x16::new([1, 2, 2, 14, 2, 16, 17, 18, 2, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([2, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8])]; let r: [u8x16; 3] = transmute(vld3q_lane_u8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21468,8 +21468,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_u64() { let a: [u64; 4] = [0, 1, 2, 2]; - let b: [u64x1; 3] = [u64x1::new(0), u64x1::new(2), u64x1::new(2)]; - let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(2)]; + let b: [u64x1; 3] = [u64x1::new([0]), u64x1::new([2]), u64x1::new([2])]; + let e: [u64x1; 3] = [u64x1::new([1]), u64x1::new([2]), u64x1::new([2])]; let r: [u64x1; 3] = transmute(vld3_lane_u64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21477,8 +21477,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_u64() { let a: [u64; 7] = [0, 1, 2, 2, 4, 5, 6]; - let b: [u64x2; 3] = [u64x2::new(0, 2), u64x2::new(2, 14), u64x2::new(2, 16)]; - let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(2, 14), u64x2::new(2, 16)]; + let b: [u64x2; 3] = [u64x2::new([0, 2]), u64x2::new([2, 14]), u64x2::new([2, 16])]; + let e: [u64x2; 3] = [u64x2::new([1, 2]), u64x2::new([2, 14]), u64x2::new([2, 16])]; let r: [u64x2; 3] = transmute(vld3q_lane_u64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21495,8 +21495,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_f64() { let a: [f64; 7] = [0., 1., 2., 2., 4., 5., 6.]; - let b: [f64x2; 3] = [f64x2::new(0., 2.), f64x2::new(2., 14.), f64x2::new(9., 16.)]; - let e: [f64x2; 3] = [f64x2::new(1., 2.), f64x2::new(2., 14.), f64x2::new(2., 16.)]; + let b: [f64x2; 3] = [f64x2::new([0., 2.]), f64x2::new([2., 14.]), f64x2::new([9., 16.])]; + let e: [f64x2; 3] = [f64x2::new([1., 2.]), f64x2::new([2., 14.]), f64x2::new([2., 16.])]; let r: [f64x2; 3] = transmute(vld3q_lane_f64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21504,7 +21504,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_s64() { let a: [i64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8]; - let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 6), i64x2::new(2, 6), i64x2::new(6, 8)]; + let e: [i64x2; 4] = [i64x2::new([1, 2]), i64x2::new([2, 6]), i64x2::new([2, 6]), i64x2::new([6, 8])]; let r: [i64x2; 4] = transmute(vld4q_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21512,7 +21512,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_u64() { let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8]; - let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(2, 6), u64x2::new(2, 6), u64x2::new(6, 8)]; + let e: [u64x2; 4] = [u64x2::new([1, 2]), u64x2::new([2, 6]), u64x2::new([2, 6]), u64x2::new([6, 8])]; let r: [u64x2; 4] = transmute(vld4q_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21520,7 +21520,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_p64() { let a: [u64; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8]; - let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 6), i64x2::new(2, 6), i64x2::new(6, 8)]; + let e: [i64x2; 4] = [i64x2::new([1, 2]), i64x2::new([2, 6]), i64x2::new([2, 6]), i64x2::new([6, 8])]; let r: [i64x2; 4] = transmute(vld4q_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21536,7 +21536,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_f64() { let a: [f64; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.]; - let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(2., 6.), f64x2::new(2., 6.), f64x2::new(6., 8.)]; + let e: [f64x2; 4] = [f64x2::new([1., 2.]), f64x2::new([2., 6.]), f64x2::new([2., 6.]), f64x2::new([6., 8.])]; let r: [f64x2; 4] = transmute(vld4q_f64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21544,7 +21544,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_s64() { let a: [i64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5]; - let e: [i64x2; 4] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)]; + let e: [i64x2; 4] = [i64x2::new([1, 1]), i64x2::new([1, 1]), i64x2::new([1, 1]), i64x2::new([1, 1])]; let r: [i64x2; 4] = transmute(vld4q_dup_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21552,7 +21552,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_u64() { let a: [u64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5]; - let e: [u64x2; 4] = [u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1), u64x2::new(1, 1)]; + let e: [u64x2; 4] = [u64x2::new([1, 1]), u64x2::new([1, 1]), u64x2::new([1, 1]), u64x2::new([1, 1])]; let r: [u64x2; 4] = transmute(vld4q_dup_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21560,7 +21560,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_p64() { let a: [u64; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5]; - let e: [i64x2; 4] = [i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1), i64x2::new(1, 1)]; + let e: [i64x2; 4] = [i64x2::new([1, 1]), i64x2::new([1, 1]), i64x2::new([1, 1]), i64x2::new([1, 1])]; let r: [i64x2; 4] = transmute(vld4q_dup_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21576,7 +21576,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_f64() { let a: [f64; 9] = [0., 1., 1., 1., 1., 6., 4., 3., 5.]; - let e: [f64x2; 4] = [f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.), f64x2::new(1., 1.)]; + let e: [f64x2; 4] = [f64x2::new([1., 1.]), f64x2::new([1., 1.]), f64x2::new([1., 1.]), f64x2::new([1., 1.])]; let r: [f64x2; 4] = transmute(vld4q_dup_f64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -21584,8 +21584,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_s8() { let a: [i8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16]; - let b: [i8x16; 4] = [i8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)]; - let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)]; + let b: [i8x16; 4] = [i8x16::new([0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26]), i8x16::new([11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]), i8x16::new([1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16])]; + let e: [i8x16; 4] = [i8x16::new([1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26]), i8x16::new([2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]), i8x16::new([2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16])]; let r: [i8x16; 4] = transmute(vld4q_lane_s8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21593,8 +21593,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_s64() { let a: [i64; 5] = [0, 1, 2, 2, 2]; - let b: [i64x1; 4] = [i64x1::new(0), i64x1::new(2), i64x1::new(2), i64x1::new(2)]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(2)]; + let b: [i64x1; 4] = [i64x1::new([0]), i64x1::new([2]), i64x1::new([2]), i64x1::new([2])]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2]), i64x1::new([2])]; let r: [i64x1; 4] = transmute(vld4_lane_s64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21602,8 +21602,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_s64() { let a: [i64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8]; - let b: [i64x2; 4] = [i64x2::new(0, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)]; - let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)]; + let b: [i64x2; 4] = [i64x2::new([0, 2]), i64x2::new([2, 2]), i64x2::new([2, 16]), i64x2::new([2, 18])]; + let e: [i64x2; 4] = [i64x2::new([1, 2]), i64x2::new([2, 2]), i64x2::new([2, 16]), i64x2::new([2, 18])]; let r: [i64x2; 4] = transmute(vld4q_lane_s64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21611,8 +21611,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_p64() { let a: [u64; 5] = [0, 1, 2, 2, 2]; - let b: [i64x1; 4] = [i64x1::new(0), i64x1::new(2), i64x1::new(2), i64x1::new(2)]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(2)]; + let b: [i64x1; 4] = [i64x1::new([0]), i64x1::new([2]), i64x1::new([2]), i64x1::new([2])]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2]), i64x1::new([2])]; let r: [i64x1; 4] = transmute(vld4_lane_p64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21620,8 +21620,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_p64() { let a: [u64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8]; - let b: [i64x2; 4] = [i64x2::new(0, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)]; - let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(2, 2), i64x2::new(2, 16), i64x2::new(2, 18)]; + let b: [i64x2; 4] = [i64x2::new([0, 2]), i64x2::new([2, 2]), i64x2::new([2, 16]), i64x2::new([2, 18])]; + let e: [i64x2; 4] = [i64x2::new([1, 2]), i64x2::new([2, 2]), i64x2::new([2, 16]), i64x2::new([2, 18])]; let r: [i64x2; 4] = transmute(vld4q_lane_p64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21629,8 +21629,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_p8() { let a: [u8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16]; - let b: [i8x16; 4] = [i8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)]; - let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), i8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), i8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), i8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)]; + let b: [i8x16; 4] = [i8x16::new([0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26]), i8x16::new([11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]), i8x16::new([1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16])]; + let e: [i8x16; 4] = [i8x16::new([1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26]), i8x16::new([2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26]), i8x16::new([2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]), i8x16::new([2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16])]; let r: [i8x16; 4] = transmute(vld4q_lane_p8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21638,8 +21638,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_u8() { let a: [u8; 65] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16]; - let b: [u8x16; 4] = [u8x16::new(0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), u8x16::new(11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), u8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)]; - let e: [u8x16; 4] = [u8x16::new(1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26), u8x16::new(2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26), u8x16::new(2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8), u8x16::new(2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16)]; + let b: [u8x16; 4] = [u8x16::new([0, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26]), u8x16::new([11, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]), u8x16::new([1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16])]; + let e: [u8x16; 4] = [u8x16::new([1, 2, 2, 2, 2, 16, 2, 18, 2, 20, 21, 22, 2, 24, 25, 26]), u8x16::new([2, 12, 13, 14, 15, 16, 2, 18, 2, 20, 21, 22, 23, 24, 25, 26]), u8x16::new([2, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]), u8x16::new([2, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16])]; let r: [u8x16; 4] = transmute(vld4q_lane_u8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21647,8 +21647,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_u64() { let a: [u64; 5] = [0, 1, 2, 2, 2]; - let b: [u64x1; 4] = [u64x1::new(0), u64x1::new(2), u64x1::new(2), u64x1::new(2)]; - let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(2), u64x1::new(2)]; + let b: [u64x1; 4] = [u64x1::new([0]), u64x1::new([2]), u64x1::new([2]), u64x1::new([2])]; + let e: [u64x1; 4] = [u64x1::new([1]), u64x1::new([2]), u64x1::new([2]), u64x1::new([2])]; let r: [u64x1; 4] = transmute(vld4_lane_u64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21656,8 +21656,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_u64() { let a: [u64; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8]; - let b: [u64x2; 4] = [u64x2::new(0, 2), u64x2::new(2, 2), u64x2::new(2, 16), u64x2::new(2, 18)]; - let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(2, 2), u64x2::new(2, 16), u64x2::new(2, 18)]; + let b: [u64x2; 4] = [u64x2::new([0, 2]), u64x2::new([2, 2]), u64x2::new([2, 16]), u64x2::new([2, 18])]; + let e: [u64x2; 4] = [u64x2::new([1, 2]), u64x2::new([2, 2]), u64x2::new([2, 16]), u64x2::new([2, 18])]; let r: [u64x2; 4] = transmute(vld4q_lane_u64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -21674,8 +21674,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_f64() { let a: [f64; 9] = [0., 1., 2., 2., 2., 5., 6., 7., 8.]; - let b: [f64x2; 4] = [f64x2::new(0., 2.), f64x2::new(2., 2.), f64x2::new(2., 16.), f64x2::new(2., 18.)]; - let e: [f64x2; 4] = [f64x2::new(1., 2.), f64x2::new(2., 2.), f64x2::new(2., 16.), f64x2::new(2., 18.)]; + let b: [f64x2; 4] = [f64x2::new([0., 2.]), f64x2::new([2., 2.]), f64x2::new([2., 16.]), f64x2::new([2., 18.])]; + let e: [f64x2; 4] = [f64x2::new([1., 2.]), f64x2::new([2., 2.]), f64x2::new([2., 16.]), f64x2::new([2., 18.])]; let r: [f64x2; 4] = transmute(vld4q_lane_f64::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } diff --git a/crates/core_arch/src/arm_shared/neon/generated.rs b/crates/core_arch/src/arm_shared/neon/generated.rs index 775811e657..6663895063 100644 --- a/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/crates/core_arch/src/arm_shared/neon/generated.rs @@ -1532,7 +1532,7 @@ pub unsafe fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8) #[inline] @@ -1545,7 +1545,7 @@ pub unsafe fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8) #[inline] @@ -1558,7 +1558,7 @@ pub unsafe fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16) #[inline] @@ -1571,7 +1571,7 @@ pub unsafe fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16) #[inline] @@ -1584,7 +1584,7 @@ pub unsafe fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32) #[inline] @@ -1597,7 +1597,7 @@ pub unsafe fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { simd_gt(a, b) } -/// Compare unsigned highe +/// Compare unsigned greater than /// /// [Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32) #[inline] @@ -34001,7 +34001,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s8_x2() { let a: [i8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i8x8; 2] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [i8x8; 2] = [i8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i8x8::new([9, 10, 11, 12, 13, 14, 15, 16])]; let r: [i8x8; 2] = transmute(vld1_s8_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34009,7 +34009,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s16_x2() { let a: [i16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [i16x4; 2] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8)]; + let e: [i16x4; 2] = [i16x4::new([1, 2, 3, 4]), i16x4::new([5, 6, 7, 8])]; let r: [i16x4; 2] = transmute(vld1_s16_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34017,7 +34017,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s32_x2() { let a: [i32; 5] = [0, 1, 2, 3, 4]; - let e: [i32x2; 2] = [i32x2::new(1, 2), i32x2::new(3, 4)]; + let e: [i32x2; 2] = [i32x2::new([1, 2]), i32x2::new([3, 4])]; let r: [i32x2; 2] = transmute(vld1_s32_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34025,7 +34025,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s64_x2() { let a: [i64; 3] = [0, 1, 2]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([2])]; let r: [i64x1; 2] = transmute(vld1_s64_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34033,7 +34033,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s8_x2() { let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i8x16; 2] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i8x16; 2] = [i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i8x16; 2] = transmute(vld1q_s8_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34041,7 +34041,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s16_x2() { let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i16x8; 2] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [i16x8; 2] = [i16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i16x8::new([9, 10, 11, 12, 13, 14, 15, 16])]; let r: [i16x8; 2] = transmute(vld1q_s16_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34049,7 +34049,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s32_x2() { let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [i32x4; 2] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8)]; + let e: [i32x4; 2] = [i32x4::new([1, 2, 3, 4]), i32x4::new([5, 6, 7, 8])]; let r: [i32x4; 2] = transmute(vld1q_s32_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34057,7 +34057,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s64_x2() { let a: [i64; 5] = [0, 1, 2, 3, 4]; - let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(3, 4)]; + let e: [i64x2; 2] = [i64x2::new([1, 2]), i64x2::new([3, 4])]; let r: [i64x2; 2] = transmute(vld1q_s64_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34065,7 +34065,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s8_x3() { let a: [i8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; - let e: [i8x8; 3] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let e: [i8x8; 3] = [i8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i8x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i8x8::new([17, 18, 19, 20, 21, 22, 23, 24])]; let r: [i8x8; 3] = transmute(vld1_s8_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34073,7 +34073,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s16_x3() { let a: [i16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - let e: [i16x4; 3] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12)]; + let e: [i16x4; 3] = [i16x4::new([1, 2, 3, 4]), i16x4::new([5, 6, 7, 8]), i16x4::new([9, 10, 11, 12])]; let r: [i16x4; 3] = transmute(vld1_s16_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34081,7 +34081,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s32_x3() { let a: [i32; 7] = [0, 1, 2, 3, 4, 5, 6]; - let e: [i32x2; 3] = [i32x2::new(1, 2), i32x2::new(3, 4), i32x2::new(5, 6)]; + let e: [i32x2; 3] = [i32x2::new([1, 2]), i32x2::new([3, 4]), i32x2::new([5, 6])]; let r: [i32x2; 3] = transmute(vld1_s32_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34089,7 +34089,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s64_x3() { let a: [i64; 4] = [0, 1, 2, 3]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(3)]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([3])]; let r: [i64x1; 3] = transmute(vld1_s64_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34097,7 +34097,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s8_x3() { let a: [i8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i8x16; 3] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [i8x16; 3] = [i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])]; let r: [i8x16; 3] = transmute(vld1q_s8_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34105,7 +34105,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s16_x3() { let a: [i16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; - let e: [i16x8; 3] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let e: [i16x8; 3] = [i16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i16x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i16x8::new([17, 18, 19, 20, 21, 22, 23, 24])]; let r: [i16x8; 3] = transmute(vld1q_s16_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34113,7 +34113,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s32_x3() { let a: [i32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - let e: [i32x4; 3] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8), i32x4::new(9, 10, 11, 12)]; + let e: [i32x4; 3] = [i32x4::new([1, 2, 3, 4]), i32x4::new([5, 6, 7, 8]), i32x4::new([9, 10, 11, 12])]; let r: [i32x4; 3] = transmute(vld1q_s32_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34121,7 +34121,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s64_x3() { let a: [i64; 7] = [0, 1, 2, 3, 4, 5, 6]; - let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6)]; + let e: [i64x2; 3] = [i64x2::new([1, 2]), i64x2::new([3, 4]), i64x2::new([5, 6])]; let r: [i64x2; 3] = transmute(vld1q_s64_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34129,7 +34129,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s8_x4() { let a: [i8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i8x8; 4] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24), i8x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i8x8; 4] = [i8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i8x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i8x8::new([17, 18, 19, 20, 21, 22, 23, 24]), i8x8::new([25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i8x8; 4] = transmute(vld1_s8_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34137,7 +34137,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s16_x4() { let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i16x4; 4] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12), i16x4::new(13, 14, 15, 16)]; + let e: [i16x4; 4] = [i16x4::new([1, 2, 3, 4]), i16x4::new([5, 6, 7, 8]), i16x4::new([9, 10, 11, 12]), i16x4::new([13, 14, 15, 16])]; let r: [i16x4; 4] = transmute(vld1_s16_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34145,7 +34145,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s32_x4() { let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [i32x2; 4] = [i32x2::new(1, 2), i32x2::new(3, 4), i32x2::new(5, 6), i32x2::new(7, 8)]; + let e: [i32x2; 4] = [i32x2::new([1, 2]), i32x2::new([3, 4]), i32x2::new([5, 6]), i32x2::new([7, 8])]; let r: [i32x2; 4] = transmute(vld1_s32_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34153,7 +34153,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_s64_x4() { let a: [i64; 5] = [0, 1, 2, 3, 4]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(3), i64x1::new(4)]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([3]), i64x1::new([4])]; let r: [i64x1; 4] = transmute(vld1_s64_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34161,7 +34161,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s8_x4() { let a: [i8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i8x16; 4] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i8x16; 4] = [i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i8x16; 4] = transmute(vld1q_s8_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34169,7 +34169,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s16_x4() { let a: [i16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i16x8; 4] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24), i16x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i16x8; 4] = [i16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i16x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i16x8::new([17, 18, 19, 20, 21, 22, 23, 24]), i16x8::new([25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i16x8; 4] = transmute(vld1q_s16_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34177,7 +34177,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s32_x4() { let a: [i32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i32x4; 4] = [i32x4::new(1, 2, 3, 4), i32x4::new(5, 6, 7, 8), i32x4::new(9, 10, 11, 12), i32x4::new(13, 14, 15, 16)]; + let e: [i32x4; 4] = [i32x4::new([1, 2, 3, 4]), i32x4::new([5, 6, 7, 8]), i32x4::new([9, 10, 11, 12]), i32x4::new([13, 14, 15, 16])]; let r: [i32x4; 4] = transmute(vld1q_s32_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34185,7 +34185,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_s64_x4() { let a: [i64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6), i64x2::new(7, 8)]; + let e: [i64x2; 4] = [i64x2::new([1, 2]), i64x2::new([3, 4]), i64x2::new([5, 6]), i64x2::new([7, 8])]; let r: [i64x2; 4] = transmute(vld1q_s64_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34193,7 +34193,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u8_x2() { let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [u8x8; 2] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [u8x8; 2] = [u8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), u8x8::new([9, 10, 11, 12, 13, 14, 15, 16])]; let r: [u8x8; 2] = transmute(vld1_u8_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34201,7 +34201,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u16_x2() { let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [u16x4; 2] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8)]; + let e: [u16x4; 2] = [u16x4::new([1, 2, 3, 4]), u16x4::new([5, 6, 7, 8])]; let r: [u16x4; 2] = transmute(vld1_u16_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34209,7 +34209,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u32_x2() { let a: [u32; 5] = [0, 1, 2, 3, 4]; - let e: [u32x2; 2] = [u32x2::new(1, 2), u32x2::new(3, 4)]; + let e: [u32x2; 2] = [u32x2::new([1, 2]), u32x2::new([3, 4])]; let r: [u32x2; 2] = transmute(vld1_u32_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34217,7 +34217,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u64_x2() { let a: [u64; 3] = [0, 1, 2]; - let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)]; + let e: [u64x1; 2] = [u64x1::new([1]), u64x1::new([2])]; let r: [u64x1; 2] = transmute(vld1_u64_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34225,7 +34225,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u8_x2() { let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [u8x16; 2] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [u8x16; 2] = [u8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), u8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32])]; let r: [u8x16; 2] = transmute(vld1q_u8_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34233,7 +34233,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u16_x2() { let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [u16x8; 2] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [u16x8; 2] = [u16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), u16x8::new([9, 10, 11, 12, 13, 14, 15, 16])]; let r: [u16x8; 2] = transmute(vld1q_u16_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34241,7 +34241,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u32_x2() { let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [u32x4; 2] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8)]; + let e: [u32x4; 2] = [u32x4::new([1, 2, 3, 4]), u32x4::new([5, 6, 7, 8])]; let r: [u32x4; 2] = transmute(vld1q_u32_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34249,7 +34249,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u64_x2() { let a: [u64; 5] = [0, 1, 2, 3, 4]; - let e: [u64x2; 2] = [u64x2::new(1, 2), u64x2::new(3, 4)]; + let e: [u64x2; 2] = [u64x2::new([1, 2]), u64x2::new([3, 4])]; let r: [u64x2; 2] = transmute(vld1q_u64_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34257,7 +34257,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u8_x3() { let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; - let e: [u8x8; 3] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16), u8x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let e: [u8x8; 3] = [u8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), u8x8::new([9, 10, 11, 12, 13, 14, 15, 16]), u8x8::new([17, 18, 19, 20, 21, 22, 23, 24])]; let r: [u8x8; 3] = transmute(vld1_u8_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34265,7 +34265,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u16_x3() { let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - let e: [u16x4; 3] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8), u16x4::new(9, 10, 11, 12)]; + let e: [u16x4; 3] = [u16x4::new([1, 2, 3, 4]), u16x4::new([5, 6, 7, 8]), u16x4::new([9, 10, 11, 12])]; let r: [u16x4; 3] = transmute(vld1_u16_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34273,7 +34273,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u32_x3() { let a: [u32; 7] = [0, 1, 2, 3, 4, 5, 6]; - let e: [u32x2; 3] = [u32x2::new(1, 2), u32x2::new(3, 4), u32x2::new(5, 6)]; + let e: [u32x2; 3] = [u32x2::new([1, 2]), u32x2::new([3, 4]), u32x2::new([5, 6])]; let r: [u32x2; 3] = transmute(vld1_u32_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34281,7 +34281,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u64_x3() { let a: [u64; 4] = [0, 1, 2, 3]; - let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(3)]; + let e: [u64x1; 3] = [u64x1::new([1]), u64x1::new([2]), u64x1::new([3])]; let r: [u64x1; 3] = transmute(vld1_u64_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34289,7 +34289,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u8_x3() { let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [u8x16; 3] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [u8x16; 3] = [u8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), u8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), u8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])]; let r: [u8x16; 3] = transmute(vld1q_u8_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34297,7 +34297,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u16_x3() { let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; - let e: [u16x8; 3] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16), u16x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let e: [u16x8; 3] = [u16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), u16x8::new([9, 10, 11, 12, 13, 14, 15, 16]), u16x8::new([17, 18, 19, 20, 21, 22, 23, 24])]; let r: [u16x8; 3] = transmute(vld1q_u16_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34305,7 +34305,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u32_x3() { let a: [u32; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - let e: [u32x4; 3] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8), u32x4::new(9, 10, 11, 12)]; + let e: [u32x4; 3] = [u32x4::new([1, 2, 3, 4]), u32x4::new([5, 6, 7, 8]), u32x4::new([9, 10, 11, 12])]; let r: [u32x4; 3] = transmute(vld1q_u32_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34313,7 +34313,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u64_x3() { let a: [u64; 7] = [0, 1, 2, 3, 4, 5, 6]; - let e: [u64x2; 3] = [u64x2::new(1, 2), u64x2::new(3, 4), u64x2::new(5, 6)]; + let e: [u64x2; 3] = [u64x2::new([1, 2]), u64x2::new([3, 4]), u64x2::new([5, 6])]; let r: [u64x2; 3] = transmute(vld1q_u64_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34321,7 +34321,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u8_x4() { let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [u8x8; 4] = [u8x8::new(1, 2, 3, 4, 5, 6, 7, 8), u8x8::new(9, 10, 11, 12, 13, 14, 15, 16), u8x8::new(17, 18, 19, 20, 21, 22, 23, 24), u8x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [u8x8; 4] = [u8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), u8x8::new([9, 10, 11, 12, 13, 14, 15, 16]), u8x8::new([17, 18, 19, 20, 21, 22, 23, 24]), u8x8::new([25, 26, 27, 28, 29, 30, 31, 32])]; let r: [u8x8; 4] = transmute(vld1_u8_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34329,7 +34329,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u16_x4() { let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [u16x4; 4] = [u16x4::new(1, 2, 3, 4), u16x4::new(5, 6, 7, 8), u16x4::new(9, 10, 11, 12), u16x4::new(13, 14, 15, 16)]; + let e: [u16x4; 4] = [u16x4::new([1, 2, 3, 4]), u16x4::new([5, 6, 7, 8]), u16x4::new([9, 10, 11, 12]), u16x4::new([13, 14, 15, 16])]; let r: [u16x4; 4] = transmute(vld1_u16_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34337,7 +34337,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u32_x4() { let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [u32x2; 4] = [u32x2::new(1, 2), u32x2::new(3, 4), u32x2::new(5, 6), u32x2::new(7, 8)]; + let e: [u32x2; 4] = [u32x2::new([1, 2]), u32x2::new([3, 4]), u32x2::new([5, 6]), u32x2::new([7, 8])]; let r: [u32x2; 4] = transmute(vld1_u32_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34345,7 +34345,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_u64_x4() { let a: [u64; 5] = [0, 1, 2, 3, 4]; - let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(3), u64x1::new(4)]; + let e: [u64x1; 4] = [u64x1::new([1]), u64x1::new([2]), u64x1::new([3]), u64x1::new([4])]; let r: [u64x1; 4] = transmute(vld1_u64_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34353,7 +34353,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u8_x4() { let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [u8x16; 4] = [u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), u8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [u8x16; 4] = [u8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), u8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), u8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), u8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32])]; let r: [u8x16; 4] = transmute(vld1q_u8_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34361,7 +34361,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u16_x4() { let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [u16x8; 4] = [u16x8::new(1, 2, 3, 4, 5, 6, 7, 8), u16x8::new(9, 10, 11, 12, 13, 14, 15, 16), u16x8::new(17, 18, 19, 20, 21, 22, 23, 24), u16x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [u16x8; 4] = [u16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), u16x8::new([9, 10, 11, 12, 13, 14, 15, 16]), u16x8::new([17, 18, 19, 20, 21, 22, 23, 24]), u16x8::new([25, 26, 27, 28, 29, 30, 31, 32])]; let r: [u16x8; 4] = transmute(vld1q_u16_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34369,7 +34369,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u32_x4() { let a: [u32; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [u32x4; 4] = [u32x4::new(1, 2, 3, 4), u32x4::new(5, 6, 7, 8), u32x4::new(9, 10, 11, 12), u32x4::new(13, 14, 15, 16)]; + let e: [u32x4; 4] = [u32x4::new([1, 2, 3, 4]), u32x4::new([5, 6, 7, 8]), u32x4::new([9, 10, 11, 12]), u32x4::new([13, 14, 15, 16])]; let r: [u32x4; 4] = transmute(vld1q_u32_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34377,7 +34377,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_u64_x4() { let a: [u64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [u64x2; 4] = [u64x2::new(1, 2), u64x2::new(3, 4), u64x2::new(5, 6), u64x2::new(7, 8)]; + let e: [u64x2; 4] = [u64x2::new([1, 2]), u64x2::new([3, 4]), u64x2::new([5, 6]), u64x2::new([7, 8])]; let r: [u64x2; 4] = transmute(vld1q_u64_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34385,7 +34385,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p8_x2() { let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i8x8; 2] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [i8x8; 2] = [i8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i8x8::new([9, 10, 11, 12, 13, 14, 15, 16])]; let r: [i8x8; 2] = transmute(vld1_p8_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34393,7 +34393,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p8_x3() { let a: [u8; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; - let e: [i8x8; 3] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let e: [i8x8; 3] = [i8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i8x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i8x8::new([17, 18, 19, 20, 21, 22, 23, 24])]; let r: [i8x8; 3] = transmute(vld1_p8_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34401,7 +34401,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p8_x4() { let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i8x8; 4] = [i8x8::new(1, 2, 3, 4, 5, 6, 7, 8), i8x8::new(9, 10, 11, 12, 13, 14, 15, 16), i8x8::new(17, 18, 19, 20, 21, 22, 23, 24), i8x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i8x8; 4] = [i8x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i8x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i8x8::new([17, 18, 19, 20, 21, 22, 23, 24]), i8x8::new([25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i8x8; 4] = transmute(vld1_p8_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34409,7 +34409,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p8_x2() { let a: [u8; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i8x16; 2] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i8x16; 2] = [i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i8x16; 2] = transmute(vld1q_p8_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34417,7 +34417,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p8_x3() { let a: [u8; 49] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i8x16; 3] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [i8x16; 3] = [i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16])]; let r: [i8x16; 3] = transmute(vld1q_p8_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34425,7 +34425,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p8_x4() { let a: [u8; 65] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i8x16; 4] = [i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16), i8x16::new(17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i8x16; 4] = [i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]), i8x16::new([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), i8x16::new([17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i8x16; 4] = transmute(vld1q_p8_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34433,7 +34433,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p16_x2() { let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [i16x4; 2] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8)]; + let e: [i16x4; 2] = [i16x4::new([1, 2, 3, 4]), i16x4::new([5, 6, 7, 8])]; let r: [i16x4; 2] = transmute(vld1_p16_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34441,7 +34441,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p16_x3() { let a: [u16; 13] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; - let e: [i16x4; 3] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12)]; + let e: [i16x4; 3] = [i16x4::new([1, 2, 3, 4]), i16x4::new([5, 6, 7, 8]), i16x4::new([9, 10, 11, 12])]; let r: [i16x4; 3] = transmute(vld1_p16_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34449,7 +34449,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p16_x4() { let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i16x4; 4] = [i16x4::new(1, 2, 3, 4), i16x4::new(5, 6, 7, 8), i16x4::new(9, 10, 11, 12), i16x4::new(13, 14, 15, 16)]; + let e: [i16x4; 4] = [i16x4::new([1, 2, 3, 4]), i16x4::new([5, 6, 7, 8]), i16x4::new([9, 10, 11, 12]), i16x4::new([13, 14, 15, 16])]; let r: [i16x4; 4] = transmute(vld1_p16_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34457,7 +34457,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p16_x2() { let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; - let e: [i16x8; 2] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16)]; + let e: [i16x8; 2] = [i16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i16x8::new([9, 10, 11, 12, 13, 14, 15, 16])]; let r: [i16x8; 2] = transmute(vld1q_p16_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34465,7 +34465,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p16_x3() { let a: [u16; 25] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]; - let e: [i16x8; 3] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24)]; + let e: [i16x8; 3] = [i16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i16x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i16x8::new([17, 18, 19, 20, 21, 22, 23, 24])]; let r: [i16x8; 3] = transmute(vld1q_p16_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34473,7 +34473,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p16_x4() { let a: [u16; 33] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]; - let e: [i16x8; 4] = [i16x8::new(1, 2, 3, 4, 5, 6, 7, 8), i16x8::new(9, 10, 11, 12, 13, 14, 15, 16), i16x8::new(17, 18, 19, 20, 21, 22, 23, 24), i16x8::new(25, 26, 27, 28, 29, 30, 31, 32)]; + let e: [i16x8; 4] = [i16x8::new([1, 2, 3, 4, 5, 6, 7, 8]), i16x8::new([9, 10, 11, 12, 13, 14, 15, 16]), i16x8::new([17, 18, 19, 20, 21, 22, 23, 24]), i16x8::new([25, 26, 27, 28, 29, 30, 31, 32])]; let r: [i16x8; 4] = transmute(vld1q_p16_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34481,7 +34481,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p64_x2() { let a: [u64; 3] = [0, 1, 2]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([2])]; let r: [i64x1; 2] = transmute(vld1_p64_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34489,7 +34489,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p64_x3() { let a: [u64; 4] = [0, 1, 2, 3]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(3)]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([3])]; let r: [i64x1; 3] = transmute(vld1_p64_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34497,7 +34497,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_p64_x4() { let a: [u64; 5] = [0, 1, 2, 3, 4]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(3), i64x1::new(4)]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([3]), i64x1::new([4])]; let r: [i64x1; 4] = transmute(vld1_p64_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34505,7 +34505,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p64_x2() { let a: [u64; 5] = [0, 1, 2, 3, 4]; - let e: [i64x2; 2] = [i64x2::new(1, 2), i64x2::new(3, 4)]; + let e: [i64x2; 2] = [i64x2::new([1, 2]), i64x2::new([3, 4])]; let r: [i64x2; 2] = transmute(vld1q_p64_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34513,7 +34513,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p64_x3() { let a: [u64; 7] = [0, 1, 2, 3, 4, 5, 6]; - let e: [i64x2; 3] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6)]; + let e: [i64x2; 3] = [i64x2::new([1, 2]), i64x2::new([3, 4]), i64x2::new([5, 6])]; let r: [i64x2; 3] = transmute(vld1q_p64_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34521,7 +34521,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_p64_x4() { let a: [u64; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let e: [i64x2; 4] = [i64x2::new(1, 2), i64x2::new(3, 4), i64x2::new(5, 6), i64x2::new(7, 8)]; + let e: [i64x2; 4] = [i64x2::new([1, 2]), i64x2::new([3, 4]), i64x2::new([5, 6]), i64x2::new([7, 8])]; let r: [i64x2; 4] = transmute(vld1q_p64_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34529,7 +34529,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_f32_x2() { let a: [f32; 5] = [0., 1., 2., 3., 4.]; - let e: [f32x2; 2] = [f32x2::new(1., 2.), f32x2::new(3., 4.)]; + let e: [f32x2; 2] = [f32x2::new([1., 2.]), f32x2::new([3., 4.])]; let r: [f32x2; 2] = transmute(vld1_f32_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34537,7 +34537,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_f32_x2() { let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; - let e: [f32x4; 2] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.)]; + let e: [f32x4; 2] = [f32x4::new([1., 2., 3., 4.]), f32x4::new([5., 6., 7., 8.])]; let r: [f32x4; 2] = transmute(vld1q_f32_x2(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34545,7 +34545,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_f32_x3() { let a: [f32; 7] = [0., 1., 2., 3., 4., 5., 6.]; - let e: [f32x2; 3] = [f32x2::new(1., 2.), f32x2::new(3., 4.), f32x2::new(5., 6.)]; + let e: [f32x2; 3] = [f32x2::new([1., 2.]), f32x2::new([3., 4.]), f32x2::new([5., 6.])]; let r: [f32x2; 3] = transmute(vld1_f32_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34553,7 +34553,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_f32_x3() { let a: [f32; 13] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.]; - let e: [f32x4; 3] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.), f32x4::new(9., 10., 11., 12.)]; + let e: [f32x4; 3] = [f32x4::new([1., 2., 3., 4.]), f32x4::new([5., 6., 7., 8.]), f32x4::new([9., 10., 11., 12.])]; let r: [f32x4; 3] = transmute(vld1q_f32_x3(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34561,7 +34561,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1_f32_x4() { let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; - let e: [f32x2; 4] = [f32x2::new(1., 2.), f32x2::new(3., 4.), f32x2::new(5., 6.), f32x2::new(7., 8.)]; + let e: [f32x2; 4] = [f32x2::new([1., 2.]), f32x2::new([3., 4.]), f32x2::new([5., 6.]), f32x2::new([7., 8.])]; let r: [f32x2; 4] = transmute(vld1_f32_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34569,7 +34569,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld1q_f32_x4() { let a: [f32; 17] = [0., 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12., 13., 14., 15., 16.]; - let e: [f32x4; 4] = [f32x4::new(1., 2., 3., 4.), f32x4::new(5., 6., 7., 8.), f32x4::new(9., 10., 11., 12.), f32x4::new(13., 14., 15., 16.)]; + let e: [f32x4; 4] = [f32x4::new([1., 2., 3., 4.]), f32x4::new([5., 6., 7., 8.]), f32x4::new([9., 10., 11., 12.]), f32x4::new([13., 14., 15., 16.])]; let r: [f32x4; 4] = transmute(vld1q_f32_x4(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34577,7 +34577,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_s8() { let a: [i8; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 3, 2, 3, 4, 5), i8x8::new(2, 3, 4, 5, 6, 7, 8, 9)]; + let e: [i8x8; 2] = [i8x8::new([1, 2, 2, 3, 2, 3, 4, 5]), i8x8::new([2, 3, 4, 5, 6, 7, 8, 9])]; let r: [i8x8; 2] = transmute(vld2_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34585,7 +34585,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_s16() { let a: [i16; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5]; - let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 3), i16x4::new(2, 3, 4, 5)]; + let e: [i16x4; 2] = [i16x4::new([1, 2, 2, 3]), i16x4::new([2, 3, 4, 5])]; let r: [i16x4; 2] = transmute(vld2_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34593,7 +34593,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_s32() { let a: [i32; 5] = [0, 1, 2, 2, 3]; - let e: [i32x2; 2] = [i32x2::new(1, 2), i32x2::new(2, 3)]; + let e: [i32x2; 2] = [i32x2::new([1, 2]), i32x2::new([2, 3])]; let r: [i32x2; 2] = transmute(vld2_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34601,7 +34601,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_s8() { let a: [i8; 33] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9), i8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)]; + let e: [i8x16; 2] = [i8x16::new([1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9]), i8x16::new([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17])]; let r: [i8x16; 2] = transmute(vld2q_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34609,7 +34609,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_s16() { let a: [i16; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 3, 2, 3, 4, 5), i16x8::new(2, 3, 4, 5, 6, 7, 8, 9)]; + let e: [i16x8; 2] = [i16x8::new([1, 2, 2, 3, 2, 3, 4, 5]), i16x8::new([2, 3, 4, 5, 6, 7, 8, 9])]; let r: [i16x8; 2] = transmute(vld2q_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34617,7 +34617,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_s32() { let a: [i32; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5]; - let e: [i32x4; 2] = [i32x4::new(1, 2, 2, 3), i32x4::new(2, 3, 4, 5)]; + let e: [i32x4; 2] = [i32x4::new([1, 2, 2, 3]), i32x4::new([2, 3, 4, 5])]; let r: [i32x4; 2] = transmute(vld2q_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34625,7 +34625,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_s64() { let a: [i64; 3] = [0, 1, 2]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([2])]; let r: [i64x1; 2] = transmute(vld2_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34633,7 +34633,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_u8() { let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9]; - let e: [u8x8; 2] = [u8x8::new(1, 2, 2, 3, 2, 3, 4, 5), u8x8::new(2, 3, 4, 5, 6, 7, 8, 9)]; + let e: [u8x8; 2] = [u8x8::new([1, 2, 2, 3, 2, 3, 4, 5]), u8x8::new([2, 3, 4, 5, 6, 7, 8, 9])]; let r: [u8x8; 2] = transmute(vld2_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34641,7 +34641,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_u16() { let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5]; - let e: [u16x4; 2] = [u16x4::new(1, 2, 2, 3), u16x4::new(2, 3, 4, 5)]; + let e: [u16x4; 2] = [u16x4::new([1, 2, 2, 3]), u16x4::new([2, 3, 4, 5])]; let r: [u16x4; 2] = transmute(vld2_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34649,7 +34649,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_u32() { let a: [u32; 5] = [0, 1, 2, 2, 3]; - let e: [u32x2; 2] = [u32x2::new(1, 2), u32x2::new(2, 3)]; + let e: [u32x2; 2] = [u32x2::new([1, 2]), u32x2::new([2, 3])]; let r: [u32x2; 2] = transmute(vld2_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34657,7 +34657,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_u8() { let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [u8x16; 2] = [u8x16::new(1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9), u8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)]; + let e: [u8x16; 2] = [u8x16::new([1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9]), u8x16::new([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17])]; let r: [u8x16; 2] = transmute(vld2q_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34665,7 +34665,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_u16() { let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9]; - let e: [u16x8; 2] = [u16x8::new(1, 2, 2, 3, 2, 3, 4, 5), u16x8::new(2, 3, 4, 5, 6, 7, 8, 9)]; + let e: [u16x8; 2] = [u16x8::new([1, 2, 2, 3, 2, 3, 4, 5]), u16x8::new([2, 3, 4, 5, 6, 7, 8, 9])]; let r: [u16x8; 2] = transmute(vld2q_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34673,7 +34673,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_u32() { let a: [u32; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5]; - let e: [u32x4; 2] = [u32x4::new(1, 2, 2, 3), u32x4::new(2, 3, 4, 5)]; + let e: [u32x4; 2] = [u32x4::new([1, 2, 2, 3]), u32x4::new([2, 3, 4, 5])]; let r: [u32x4; 2] = transmute(vld2q_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34681,7 +34681,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_p8() { let a: [u8; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 3, 2, 3, 4, 5), i8x8::new(2, 3, 4, 5, 6, 7, 8, 9)]; + let e: [i8x8; 2] = [i8x8::new([1, 2, 2, 3, 2, 3, 4, 5]), i8x8::new([2, 3, 4, 5, 6, 7, 8, 9])]; let r: [i8x8; 2] = transmute(vld2_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34689,7 +34689,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_p16() { let a: [u16; 9] = [0, 1, 2, 2, 3, 2, 4, 3, 5]; - let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 3), i16x4::new(2, 3, 4, 5)]; + let e: [i16x4; 2] = [i16x4::new([1, 2, 2, 3]), i16x4::new([2, 3, 4, 5])]; let r: [i16x4; 2] = transmute(vld2_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34697,7 +34697,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_p8() { let a: [u8; 33] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [i8x16; 2] = [i8x16::new(1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9), i8x16::new(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)]; + let e: [i8x16; 2] = [i8x16::new([1, 2, 2, 3, 2, 3, 4, 5, 2, 3, 4, 5, 6, 7, 8, 9]), i8x16::new([2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17])]; let r: [i8x16; 2] = transmute(vld2q_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34705,7 +34705,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_p16() { let a: [u16; 17] = [0, 1, 2, 2, 3, 2, 4, 3, 5, 2, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 3, 2, 3, 4, 5), i16x8::new(2, 3, 4, 5, 6, 7, 8, 9)]; + let e: [i16x8; 2] = [i16x8::new([1, 2, 2, 3, 2, 3, 4, 5]), i16x8::new([2, 3, 4, 5, 6, 7, 8, 9])]; let r: [i16x8; 2] = transmute(vld2q_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34713,7 +34713,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_u64() { let a: [u64; 3] = [0, 1, 2]; - let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(2)]; + let e: [u64x1; 2] = [u64x1::new([1]), u64x1::new([2])]; let r: [u64x1; 2] = transmute(vld2_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34721,7 +34721,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_p64() { let a: [u64; 3] = [0, 1, 2]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(2)]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([2])]; let r: [i64x1; 2] = transmute(vld2_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34729,7 +34729,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_f32() { let a: [f32; 5] = [0., 1., 2., 2., 3.]; - let e: [f32x2; 2] = [f32x2::new(1., 2.), f32x2::new(2., 3.)]; + let e: [f32x2; 2] = [f32x2::new([1., 2.]), f32x2::new([2., 3.])]; let r: [f32x2; 2] = transmute(vld2_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34737,7 +34737,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_f32() { let a: [f32; 9] = [0., 1., 2., 2., 3., 2., 4., 3., 5.]; - let e: [f32x4; 2] = [f32x4::new(1., 2., 2., 3.), f32x4::new(2., 3., 4., 5.)]; + let e: [f32x4; 2] = [f32x4::new([1., 2., 2., 3.]), f32x4::new([2., 3., 4., 5.])]; let r: [f32x4; 2] = transmute(vld2q_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34745,7 +34745,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_s8() { let a: [i8; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x8; 2] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x8; 2] = [i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x8; 2] = transmute(vld2_dup_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34753,7 +34753,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_s16() { let a: [i16; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5]; - let e: [i16x4; 2] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)]; + let e: [i16x4; 2] = [i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1])]; let r: [i16x4; 2] = transmute(vld2_dup_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34761,7 +34761,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_s32() { let a: [i32; 5] = [0, 1, 1, 2, 3]; - let e: [i32x2; 2] = [i32x2::new(1, 1), i32x2::new(1, 1)]; + let e: [i32x2; 2] = [i32x2::new([1, 1]), i32x2::new([1, 1])]; let r: [i32x2; 2] = transmute(vld2_dup_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34769,7 +34769,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_s8() { let a: [i8; 33] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [i8x16; 2] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x16; 2] = [i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x16; 2] = transmute(vld2q_dup_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34777,7 +34777,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_s16() { let a: [i16; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x8; 2] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i16x8; 2] = [i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i16x8; 2] = transmute(vld2q_dup_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34785,7 +34785,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_s32() { let a: [i32; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5]; - let e: [i32x4; 2] = [i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1)]; + let e: [i32x4; 2] = [i32x4::new([1, 1, 1, 1]), i32x4::new([1, 1, 1, 1])]; let r: [i32x4; 2] = transmute(vld2q_dup_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34793,7 +34793,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_s64() { let a: [i64; 3] = [0, 1, 1]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(1)]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([1])]; let r: [i64x1; 2] = transmute(vld2_dup_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34801,7 +34801,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_u8() { let a: [u8; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9]; - let e: [u8x8; 2] = [u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u8x8; 2] = [u8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u8x8; 2] = transmute(vld2_dup_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34809,7 +34809,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_u16() { let a: [u16; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5]; - let e: [u16x4; 2] = [u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1)]; + let e: [u16x4; 2] = [u16x4::new([1, 1, 1, 1]), u16x4::new([1, 1, 1, 1])]; let r: [u16x4; 2] = transmute(vld2_dup_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34817,7 +34817,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_u32() { let a: [u32; 5] = [0, 1, 1, 2, 3]; - let e: [u32x2; 2] = [u32x2::new(1, 1), u32x2::new(1, 1)]; + let e: [u32x2; 2] = [u32x2::new([1, 1]), u32x2::new([1, 1])]; let r: [u32x2; 2] = transmute(vld2_dup_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34825,7 +34825,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_u8() { let a: [u8; 33] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [u8x16; 2] = [u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u8x16; 2] = [u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u8x16; 2] = transmute(vld2q_dup_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34833,7 +34833,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_u16() { let a: [u16; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9]; - let e: [u16x8; 2] = [u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u16x8; 2] = [u16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u16x8; 2] = transmute(vld2q_dup_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34841,7 +34841,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_u32() { let a: [u32; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5]; - let e: [u32x4; 2] = [u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1)]; + let e: [u32x4; 2] = [u32x4::new([1, 1, 1, 1]), u32x4::new([1, 1, 1, 1])]; let r: [u32x4; 2] = transmute(vld2q_dup_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34849,7 +34849,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_p8() { let a: [u8; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x8; 2] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x8; 2] = [i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x8; 2] = transmute(vld2_dup_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34857,7 +34857,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_p16() { let a: [u16; 9] = [0, 1, 1, 2, 3, 1, 4, 3, 5]; - let e: [i16x4; 2] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)]; + let e: [i16x4; 2] = [i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1])]; let r: [i16x4; 2] = transmute(vld2_dup_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34865,7 +34865,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_p8() { let a: [u8; 33] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [i8x16; 2] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x16; 2] = [i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x16; 2] = transmute(vld2q_dup_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34873,7 +34873,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_p16() { let a: [u16; 17] = [0, 1, 1, 2, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x8; 2] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i16x8; 2] = [i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i16x8; 2] = transmute(vld2q_dup_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34881,7 +34881,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_u64() { let a: [u64; 3] = [0, 1, 1]; - let e: [u64x1; 2] = [u64x1::new(1), u64x1::new(1)]; + let e: [u64x1; 2] = [u64x1::new([1]), u64x1::new([1])]; let r: [u64x1; 2] = transmute(vld2_dup_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34889,7 +34889,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_p64() { let a: [u64; 3] = [0, 1, 1]; - let e: [i64x1; 2] = [i64x1::new(1), i64x1::new(1)]; + let e: [i64x1; 2] = [i64x1::new([1]), i64x1::new([1])]; let r: [i64x1; 2] = transmute(vld2_dup_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34897,7 +34897,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_dup_f32() { let a: [f32; 5] = [0., 1., 1., 2., 3.]; - let e: [f32x2; 2] = [f32x2::new(1., 1.), f32x2::new(1., 1.)]; + let e: [f32x2; 2] = [f32x2::new([1., 1.]), f32x2::new([1., 1.])]; let r: [f32x2; 2] = transmute(vld2_dup_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34905,7 +34905,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_dup_f32() { let a: [f32; 9] = [0., 1., 1., 2., 3., 1., 4., 3., 5.]; - let e: [f32x4; 2] = [f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.)]; + let e: [f32x4; 2] = [f32x4::new([1., 1., 1., 1.]), f32x4::new([1., 1., 1., 1.])]; let r: [f32x4; 2] = transmute(vld2q_dup_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -34913,8 +34913,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_s8() { let a: [i8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x8; 2] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i8x8; 2] = [i8x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i8x8; 2] = [i8x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i8x8; 2] = transmute(vld2_lane_s8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34922,8 +34922,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_s16() { let a: [i16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x4; 2] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18)]; - let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18)]; + let b: [i16x4; 2] = [i16x4::new([0, 2, 2, 14]), i16x4::new([2, 16, 17, 18])]; + let e: [i16x4; 2] = [i16x4::new([1, 2, 2, 14]), i16x4::new([2, 16, 17, 18])]; let r: [i16x4; 2] = transmute(vld2_lane_s16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34931,8 +34931,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_s32() { let a: [i32; 5] = [0, 1, 2, 3, 4]; - let b: [i32x2; 2] = [i32x2::new(0, 2), i32x2::new(2, 14)]; - let e: [i32x2; 2] = [i32x2::new(1, 2), i32x2::new(2, 14)]; + let b: [i32x2; 2] = [i32x2::new([0, 2]), i32x2::new([2, 14])]; + let e: [i32x2; 2] = [i32x2::new([1, 2]), i32x2::new([2, 14])]; let r: [i32x2; 2] = transmute(vld2_lane_s32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34940,8 +34940,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_s16() { let a: [i16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x8; 2] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i16x8; 2] = [i16x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i16x8; 2] = [i16x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i16x8; 2] = transmute(vld2q_lane_s16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34949,8 +34949,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_s32() { let a: [i32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i32x4; 2] = [i32x4::new(0, 2, 2, 14), i32x4::new(2, 16, 17, 18)]; - let e: [i32x4; 2] = [i32x4::new(1, 2, 2, 14), i32x4::new(2, 16, 17, 18)]; + let b: [i32x4; 2] = [i32x4::new([0, 2, 2, 14]), i32x4::new([2, 16, 17, 18])]; + let e: [i32x4; 2] = [i32x4::new([1, 2, 2, 14]), i32x4::new([2, 16, 17, 18])]; let r: [i32x4; 2] = transmute(vld2q_lane_s32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34958,8 +34958,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_u8() { let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u8x8; 2] = [u8x8::new(0, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [u8x8; 2] = [u8x8::new(1, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [u8x8; 2] = [u8x8::new([0, 2, 2, 14, 2, 16, 17, 18]), u8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [u8x8; 2] = [u8x8::new([1, 2, 2, 14, 2, 16, 17, 18]), u8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [u8x8; 2] = transmute(vld2_lane_u8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34967,8 +34967,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_u16() { let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u16x4; 2] = [u16x4::new(0, 2, 2, 14), u16x4::new(2, 16, 17, 18)]; - let e: [u16x4; 2] = [u16x4::new(1, 2, 2, 14), u16x4::new(2, 16, 17, 18)]; + let b: [u16x4; 2] = [u16x4::new([0, 2, 2, 14]), u16x4::new([2, 16, 17, 18])]; + let e: [u16x4; 2] = [u16x4::new([1, 2, 2, 14]), u16x4::new([2, 16, 17, 18])]; let r: [u16x4; 2] = transmute(vld2_lane_u16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34976,8 +34976,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_u32() { let a: [u32; 5] = [0, 1, 2, 3, 4]; - let b: [u32x2; 2] = [u32x2::new(0, 2), u32x2::new(2, 14)]; - let e: [u32x2; 2] = [u32x2::new(1, 2), u32x2::new(2, 14)]; + let b: [u32x2; 2] = [u32x2::new([0, 2]), u32x2::new([2, 14])]; + let e: [u32x2; 2] = [u32x2::new([1, 2]), u32x2::new([2, 14])]; let r: [u32x2; 2] = transmute(vld2_lane_u32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34985,8 +34985,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_u16() { let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u16x8; 2] = [u16x8::new(0, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [u16x8; 2] = [u16x8::new(1, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [u16x8; 2] = [u16x8::new([0, 2, 2, 14, 2, 16, 17, 18]), u16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [u16x8; 2] = [u16x8::new([1, 2, 2, 14, 2, 16, 17, 18]), u16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [u16x8; 2] = transmute(vld2q_lane_u16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -34994,8 +34994,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_u32() { let a: [u32; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u32x4; 2] = [u32x4::new(0, 2, 2, 14), u32x4::new(2, 16, 17, 18)]; - let e: [u32x4; 2] = [u32x4::new(1, 2, 2, 14), u32x4::new(2, 16, 17, 18)]; + let b: [u32x4; 2] = [u32x4::new([0, 2, 2, 14]), u32x4::new([2, 16, 17, 18])]; + let e: [u32x4; 2] = [u32x4::new([1, 2, 2, 14]), u32x4::new([2, 16, 17, 18])]; let r: [u32x4; 2] = transmute(vld2q_lane_u32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35003,8 +35003,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_p8() { let a: [u8; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x8; 2] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i8x8; 2] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i8x8; 2] = [i8x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i8x8; 2] = [i8x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i8x8; 2] = transmute(vld2_lane_p8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35012,8 +35012,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_p16() { let a: [u16; 9] = [0, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x4; 2] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18)]; - let e: [i16x4; 2] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18)]; + let b: [i16x4; 2] = [i16x4::new([0, 2, 2, 14]), i16x4::new([2, 16, 17, 18])]; + let e: [i16x4; 2] = [i16x4::new([1, 2, 2, 14]), i16x4::new([2, 16, 17, 18])]; let r: [i16x4; 2] = transmute(vld2_lane_p16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35021,8 +35021,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_p16() { let a: [u16; 17] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x8; 2] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i16x8; 2] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i16x8; 2] = [i16x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i16x8; 2] = [i16x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i16x8; 2] = transmute(vld2q_lane_p16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35030,8 +35030,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2_lane_f32() { let a: [f32; 5] = [0., 1., 2., 3., 4.]; - let b: [f32x2; 2] = [f32x2::new(0., 2.), f32x2::new(2., 14.)]; - let e: [f32x2; 2] = [f32x2::new(1., 2.), f32x2::new(2., 14.)]; + let b: [f32x2; 2] = [f32x2::new([0., 2.]), f32x2::new([2., 14.])]; + let e: [f32x2; 2] = [f32x2::new([1., 2.]), f32x2::new([2., 14.])]; let r: [f32x2; 2] = transmute(vld2_lane_f32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35039,8 +35039,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld2q_lane_f32() { let a: [f32; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; - let b: [f32x4; 2] = [f32x4::new(0., 2., 2., 14.), f32x4::new(2., 16., 17., 18.)]; - let e: [f32x4; 2] = [f32x4::new(1., 2., 2., 14.), f32x4::new(2., 16., 17., 18.)]; + let b: [f32x4; 2] = [f32x4::new([0., 2., 2., 14.]), f32x4::new([2., 16., 17., 18.])]; + let e: [f32x4; 2] = [f32x4::new([1., 2., 2., 14.]), f32x4::new([2., 16., 17., 18.])]; let r: [f32x4; 2] = transmute(vld2q_lane_f32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35048,7 +35048,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_s8() { let a: [i8; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16]; - let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 4, 2, 4, 7, 8), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16)]; + let e: [i8x8; 3] = [i8x8::new([1, 2, 2, 4, 2, 4, 7, 8]), i8x8::new([2, 4, 7, 8, 13, 14, 15, 16]), i8x8::new([2, 4, 7, 8, 13, 14, 15, 16])]; let r: [i8x8; 3] = transmute(vld3_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35056,7 +35056,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_s16() { let a: [i16; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8]; - let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 4), i16x4::new(2, 4, 7, 8), i16x4::new(2, 4, 7, 8)]; + let e: [i16x4; 3] = [i16x4::new([1, 2, 2, 4]), i16x4::new([2, 4, 7, 8]), i16x4::new([2, 4, 7, 8])]; let r: [i16x4; 3] = transmute(vld3_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35064,7 +35064,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_s32() { let a: [i32; 7] = [0, 1, 2, 2, 2, 4, 4]; - let e: [i32x2; 3] = [i32x2::new(1, 2), i32x2::new(2, 4), i32x2::new(2, 4)]; + let e: [i32x2; 3] = [i32x2::new([1, 2]), i32x2::new([2, 4]), i32x2::new([2, 4])]; let r: [i32x2; 3] = transmute(vld3_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35072,7 +35072,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_s8() { let a: [i8; 49] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48]; - let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48)]; + let e: [i8x16; 3] = [i8x16::new([1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16]), i8x16::new([2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32]), i8x16::new([2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48])]; let r: [i8x16; 3] = transmute(vld3q_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35080,7 +35080,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_s16() { let a: [i16; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16]; - let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 4, 2, 4, 7, 8), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16)]; + let e: [i16x8; 3] = [i16x8::new([1, 2, 2, 4, 2, 4, 7, 8]), i16x8::new([2, 4, 7, 8, 13, 14, 15, 16]), i16x8::new([2, 4, 7, 8, 13, 14, 15, 16])]; let r: [i16x8; 3] = transmute(vld3q_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35088,7 +35088,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_s32() { let a: [i32; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8]; - let e: [i32x4; 3] = [i32x4::new(1, 2, 2, 4), i32x4::new(2, 4, 7, 8), i32x4::new(2, 4, 7, 8)]; + let e: [i32x4; 3] = [i32x4::new([1, 2, 2, 4]), i32x4::new([2, 4, 7, 8]), i32x4::new([2, 4, 7, 8])]; let r: [i32x4; 3] = transmute(vld3q_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35096,7 +35096,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_s64() { let a: [i64; 4] = [0, 1, 2, 2]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2])]; let r: [i64x1; 3] = transmute(vld3_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35104,7 +35104,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_u8() { let a: [u8; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16]; - let e: [u8x8; 3] = [u8x8::new(1, 2, 2, 4, 2, 4, 7, 8), u8x8::new(2, 4, 7, 8, 13, 14, 15, 16), u8x8::new(2, 4, 7, 8, 13, 14, 15, 16)]; + let e: [u8x8; 3] = [u8x8::new([1, 2, 2, 4, 2, 4, 7, 8]), u8x8::new([2, 4, 7, 8, 13, 14, 15, 16]), u8x8::new([2, 4, 7, 8, 13, 14, 15, 16])]; let r: [u8x8; 3] = transmute(vld3_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35112,7 +35112,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_u16() { let a: [u16; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8]; - let e: [u16x4; 3] = [u16x4::new(1, 2, 2, 4), u16x4::new(2, 4, 7, 8), u16x4::new(2, 4, 7, 8)]; + let e: [u16x4; 3] = [u16x4::new([1, 2, 2, 4]), u16x4::new([2, 4, 7, 8]), u16x4::new([2, 4, 7, 8])]; let r: [u16x4; 3] = transmute(vld3_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35120,7 +35120,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_u32() { let a: [u32; 7] = [0, 1, 2, 2, 2, 4, 4]; - let e: [u32x2; 3] = [u32x2::new(1, 2), u32x2::new(2, 4), u32x2::new(2, 4)]; + let e: [u32x2; 3] = [u32x2::new([1, 2]), u32x2::new([2, 4]), u32x2::new([2, 4])]; let r: [u32x2; 3] = transmute(vld3_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35128,7 +35128,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_u8() { let a: [u8; 49] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48]; - let e: [u8x16; 3] = [u8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16), u8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32), u8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48)]; + let e: [u8x16; 3] = [u8x16::new([1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16]), u8x16::new([2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32]), u8x16::new([2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48])]; let r: [u8x16; 3] = transmute(vld3q_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35136,7 +35136,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_u16() { let a: [u16; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16]; - let e: [u16x8; 3] = [u16x8::new(1, 2, 2, 4, 2, 4, 7, 8), u16x8::new(2, 4, 7, 8, 13, 14, 15, 16), u16x8::new(2, 4, 7, 8, 13, 14, 15, 16)]; + let e: [u16x8; 3] = [u16x8::new([1, 2, 2, 4, 2, 4, 7, 8]), u16x8::new([2, 4, 7, 8, 13, 14, 15, 16]), u16x8::new([2, 4, 7, 8, 13, 14, 15, 16])]; let r: [u16x8; 3] = transmute(vld3q_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35144,7 +35144,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_u32() { let a: [u32; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8]; - let e: [u32x4; 3] = [u32x4::new(1, 2, 2, 4), u32x4::new(2, 4, 7, 8), u32x4::new(2, 4, 7, 8)]; + let e: [u32x4; 3] = [u32x4::new([1, 2, 2, 4]), u32x4::new([2, 4, 7, 8]), u32x4::new([2, 4, 7, 8])]; let r: [u32x4; 3] = transmute(vld3q_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35152,7 +35152,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_p8() { let a: [u8; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16]; - let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 4, 2, 4, 7, 8), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16), i8x8::new(2, 4, 7, 8, 13, 14, 15, 16)]; + let e: [i8x8; 3] = [i8x8::new([1, 2, 2, 4, 2, 4, 7, 8]), i8x8::new([2, 4, 7, 8, 13, 14, 15, 16]), i8x8::new([2, 4, 7, 8, 13, 14, 15, 16])]; let r: [i8x8; 3] = transmute(vld3_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35160,7 +35160,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_p16() { let a: [u16; 13] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8]; - let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 4), i16x4::new(2, 4, 7, 8), i16x4::new(2, 4, 7, 8)]; + let e: [i16x4; 3] = [i16x4::new([1, 2, 2, 4]), i16x4::new([2, 4, 7, 8]), i16x4::new([2, 4, 7, 8])]; let r: [i16x4; 3] = transmute(vld3_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35168,7 +35168,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_p8() { let a: [u8; 49] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16, 2, 25, 41, 4, 26, 42, 7, 27, 43, 8, 28, 44, 13, 29, 45, 14, 30, 46, 15, 31, 47, 16, 32, 48]; - let e: [i8x16; 3] = [i8x16::new(1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32), i8x16::new(2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48)]; + let e: [i8x16; 3] = [i8x16::new([1, 2, 2, 4, 2, 4, 7, 8, 2, 4, 7, 8, 13, 14, 15, 16]), i8x16::new([2, 4, 7, 8, 13, 14, 15, 16, 25, 26, 27, 28, 29, 30, 31, 32]), i8x16::new([2, 4, 7, 8, 13, 14, 15, 16, 41, 42, 43, 44, 45, 46, 47, 48])]; let r: [i8x16; 3] = transmute(vld3q_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35176,7 +35176,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_p16() { let a: [u16; 25] = [0, 1, 2, 2, 2, 4, 4, 2, 7, 7, 4, 8, 8, 2, 13, 13, 4, 14, 14, 7, 15, 15, 8, 16, 16]; - let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 4, 2, 4, 7, 8), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16), i16x8::new(2, 4, 7, 8, 13, 14, 15, 16)]; + let e: [i16x8; 3] = [i16x8::new([1, 2, 2, 4, 2, 4, 7, 8]), i16x8::new([2, 4, 7, 8, 13, 14, 15, 16]), i16x8::new([2, 4, 7, 8, 13, 14, 15, 16])]; let r: [i16x8; 3] = transmute(vld3q_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35184,7 +35184,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_u64() { let a: [u64; 4] = [0, 1, 2, 2]; - let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(2), u64x1::new(2)]; + let e: [u64x1; 3] = [u64x1::new([1]), u64x1::new([2]), u64x1::new([2])]; let r: [u64x1; 3] = transmute(vld3_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35192,7 +35192,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_p64() { let a: [u64; 4] = [0, 1, 2, 2]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(2), i64x1::new(2)]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2])]; let r: [i64x1; 3] = transmute(vld3_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35200,7 +35200,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_f32() { let a: [f32; 7] = [0., 1., 2., 2., 2., 4., 4.]; - let e: [f32x2; 3] = [f32x2::new(1., 2.), f32x2::new(2., 4.), f32x2::new(2., 4.)]; + let e: [f32x2; 3] = [f32x2::new([1., 2.]), f32x2::new([2., 4.]), f32x2::new([2., 4.])]; let r: [f32x2; 3] = transmute(vld3_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35208,7 +35208,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_f32() { let a: [f32; 13] = [0., 1., 2., 2., 2., 4., 4., 2., 7., 7., 4., 8., 8.]; - let e: [f32x4; 3] = [f32x4::new(1., 2., 2., 4.), f32x4::new(2., 4., 7., 8.), f32x4::new(2., 4., 7., 8.)]; + let e: [f32x4; 3] = [f32x4::new([1., 2., 2., 4.]), f32x4::new([2., 4., 7., 8.]), f32x4::new([2., 4., 7., 8.])]; let r: [f32x4; 3] = transmute(vld3q_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35216,7 +35216,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_s8() { let a: [i8; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13]; - let e: [i8x8; 3] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x8; 3] = [i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x8; 3] = transmute(vld3_dup_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35224,7 +35224,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_s16() { let a: [i16; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7]; - let e: [i16x4; 3] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)]; + let e: [i16x4; 3] = [i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1])]; let r: [i16x4; 3] = transmute(vld3_dup_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35232,7 +35232,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_s32() { let a: [i32; 7] = [0, 1, 1, 1, 3, 1, 4]; - let e: [i32x2; 3] = [i32x2::new(1, 1), i32x2::new(1, 1), i32x2::new(1, 1)]; + let e: [i32x2; 3] = [i32x2::new([1, 1]), i32x2::new([1, 1]), i32x2::new([1, 1])]; let r: [i32x2; 3] = transmute(vld3_dup_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35240,7 +35240,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_s8() { let a: [i8; 49] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [i8x16; 3] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x16; 3] = [i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x16; 3] = transmute(vld3q_dup_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35248,7 +35248,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_s16() { let a: [i16; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13]; - let e: [i16x8; 3] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i16x8; 3] = [i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i16x8; 3] = transmute(vld3q_dup_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35256,7 +35256,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_s32() { let a: [i32; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7]; - let e: [i32x4; 3] = [i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1)]; + let e: [i32x4; 3] = [i32x4::new([1, 1, 1, 1]), i32x4::new([1, 1, 1, 1]), i32x4::new([1, 1, 1, 1])]; let r: [i32x4; 3] = transmute(vld3q_dup_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35264,7 +35264,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_s64() { let a: [i64; 4] = [0, 1, 1, 1]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(1), i64x1::new(1)]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([1]), i64x1::new([1])]; let r: [i64x1; 3] = transmute(vld3_dup_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35272,7 +35272,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_u8() { let a: [u8; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13]; - let e: [u8x8; 3] = [u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u8x8; 3] = [u8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u8x8; 3] = transmute(vld3_dup_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35280,7 +35280,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_u16() { let a: [u16; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7]; - let e: [u16x4; 3] = [u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1)]; + let e: [u16x4; 3] = [u16x4::new([1, 1, 1, 1]), u16x4::new([1, 1, 1, 1]), u16x4::new([1, 1, 1, 1])]; let r: [u16x4; 3] = transmute(vld3_dup_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35288,7 +35288,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_u32() { let a: [u32; 7] = [0, 1, 1, 1, 3, 1, 4]; - let e: [u32x2; 3] = [u32x2::new(1, 1), u32x2::new(1, 1), u32x2::new(1, 1)]; + let e: [u32x2; 3] = [u32x2::new([1, 1]), u32x2::new([1, 1]), u32x2::new([1, 1])]; let r: [u32x2; 3] = transmute(vld3_dup_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35296,7 +35296,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_u8() { let a: [u8; 49] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [u8x16; 3] = [u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u8x16; 3] = [u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u8x16; 3] = transmute(vld3q_dup_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35304,7 +35304,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_u16() { let a: [u16; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13]; - let e: [u16x8; 3] = [u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u16x8; 3] = [u16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u16x8; 3] = transmute(vld3q_dup_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35312,7 +35312,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_u32() { let a: [u32; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7]; - let e: [u32x4; 3] = [u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1)]; + let e: [u32x4; 3] = [u32x4::new([1, 1, 1, 1]), u32x4::new([1, 1, 1, 1]), u32x4::new([1, 1, 1, 1])]; let r: [u32x4; 3] = transmute(vld3q_dup_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35320,7 +35320,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_p8() { let a: [u8; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13]; - let e: [i8x8; 3] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x8; 3] = [i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x8; 3] = transmute(vld3_dup_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35328,7 +35328,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_p16() { let a: [u16; 13] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7]; - let e: [i16x4; 3] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)]; + let e: [i16x4; 3] = [i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1])]; let r: [i16x4; 3] = transmute(vld3_dup_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35336,7 +35336,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_p8() { let a: [u8; 49] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17, 6, 14, 7, 15, 8, 16, 9, 17]; - let e: [i8x16; 3] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x16; 3] = [i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x16; 3] = transmute(vld3q_dup_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35344,7 +35344,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_p16() { let a: [u16; 25] = [0, 1, 1, 1, 3, 1, 4, 3, 5, 1, 6, 3, 7, 4, 8, 5, 9, 2, 10, 3, 11, 4, 12, 5, 13]; - let e: [i16x8; 3] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i16x8; 3] = [i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i16x8; 3] = transmute(vld3q_dup_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35352,7 +35352,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_u64() { let a: [u64; 4] = [0, 1, 1, 1]; - let e: [u64x1; 3] = [u64x1::new(1), u64x1::new(1), u64x1::new(1)]; + let e: [u64x1; 3] = [u64x1::new([1]), u64x1::new([1]), u64x1::new([1])]; let r: [u64x1; 3] = transmute(vld3_dup_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35360,7 +35360,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_p64() { let a: [u64; 4] = [0, 1, 1, 1]; - let e: [i64x1; 3] = [i64x1::new(1), i64x1::new(1), i64x1::new(1)]; + let e: [i64x1; 3] = [i64x1::new([1]), i64x1::new([1]), i64x1::new([1])]; let r: [i64x1; 3] = transmute(vld3_dup_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35368,7 +35368,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_dup_f32() { let a: [f32; 7] = [0., 1., 1., 1., 3., 1., 4.]; - let e: [f32x2; 3] = [f32x2::new(1., 1.), f32x2::new(1., 1.), f32x2::new(1., 1.)]; + let e: [f32x2; 3] = [f32x2::new([1., 1.]), f32x2::new([1., 1.]), f32x2::new([1., 1.])]; let r: [f32x2; 3] = transmute(vld3_dup_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35376,7 +35376,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_dup_f32() { let a: [f32; 13] = [0., 1., 1., 1., 3., 1., 4., 3., 5., 1., 4., 3., 5.]; - let e: [f32x4; 3] = [f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.)]; + let e: [f32x4; 3] = [f32x4::new([1., 1., 1., 1.]), f32x4::new([1., 1., 1., 1.]), f32x4::new([1., 1., 1., 1.])]; let r: [f32x4; 3] = transmute(vld3q_dup_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35384,8 +35384,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_s8() { let a: [i8; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x8; 3] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 17, 18)]; - let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 17, 18)]; + let b: [i8x8; 3] = [i8x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i8x8::new([11, 12, 13, 14, 15, 16, 17, 18])]; + let e: [i8x8; 3] = [i8x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i8x8::new([2, 12, 13, 14, 15, 16, 17, 18])]; let r: [i8x8; 3] = transmute(vld3_lane_s8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35393,8 +35393,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_s16() { let a: [i16; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4]; - let b: [i16x4; 3] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)]; - let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)]; + let b: [i16x4; 3] = [i16x4::new([0, 2, 2, 14]), i16x4::new([2, 16, 17, 18]), i16x4::new([2, 20, 21, 22])]; + let e: [i16x4; 3] = [i16x4::new([1, 2, 2, 14]), i16x4::new([2, 16, 17, 18]), i16x4::new([2, 20, 21, 22])]; let r: [i16x4; 3] = transmute(vld3_lane_s16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35402,8 +35402,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_s32() { let a: [i32; 7] = [0, 1, 2, 2, 4, 5, 6]; - let b: [i32x2; 3] = [i32x2::new(0, 2), i32x2::new(2, 14), i32x2::new(2, 16)]; - let e: [i32x2; 3] = [i32x2::new(1, 2), i32x2::new(2, 14), i32x2::new(2, 16)]; + let b: [i32x2; 3] = [i32x2::new([0, 2]), i32x2::new([2, 14]), i32x2::new([2, 16])]; + let e: [i32x2; 3] = [i32x2::new([1, 2]), i32x2::new([2, 14]), i32x2::new([2, 16])]; let r: [i32x2; 3] = transmute(vld3_lane_s32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35411,8 +35411,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_s16() { let a: [i16; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x8; 3] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 17, 18)]; - let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 17, 18)]; + let b: [i16x8; 3] = [i16x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i16x8::new([11, 12, 13, 14, 15, 16, 17, 18])]; + let e: [i16x8; 3] = [i16x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i16x8::new([2, 12, 13, 14, 15, 16, 17, 18])]; let r: [i16x8; 3] = transmute(vld3q_lane_s16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35420,8 +35420,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_s32() { let a: [i32; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4]; - let b: [i32x4; 3] = [i32x4::new(0, 2, 2, 14), i32x4::new(2, 16, 17, 18), i32x4::new(2, 20, 21, 22)]; - let e: [i32x4; 3] = [i32x4::new(1, 2, 2, 14), i32x4::new(2, 16, 17, 18), i32x4::new(2, 20, 21, 22)]; + let b: [i32x4; 3] = [i32x4::new([0, 2, 2, 14]), i32x4::new([2, 16, 17, 18]), i32x4::new([2, 20, 21, 22])]; + let e: [i32x4; 3] = [i32x4::new([1, 2, 2, 14]), i32x4::new([2, 16, 17, 18]), i32x4::new([2, 20, 21, 22])]; let r: [i32x4; 3] = transmute(vld3q_lane_s32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35429,8 +35429,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_u8() { let a: [u8; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u8x8; 3] = [u8x8::new(0, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26), u8x8::new(11, 12, 13, 14, 15, 16, 17, 18)]; - let e: [u8x8; 3] = [u8x8::new(1, 2, 2, 14, 2, 16, 17, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26), u8x8::new(2, 12, 13, 14, 15, 16, 17, 18)]; + let b: [u8x8; 3] = [u8x8::new([0, 2, 2, 14, 2, 16, 17, 18]), u8x8::new([2, 20, 21, 22, 23, 24, 25, 26]), u8x8::new([11, 12, 13, 14, 15, 16, 17, 18])]; + let e: [u8x8; 3] = [u8x8::new([1, 2, 2, 14, 2, 16, 17, 18]), u8x8::new([2, 20, 21, 22, 23, 24, 25, 26]), u8x8::new([2, 12, 13, 14, 15, 16, 17, 18])]; let r: [u8x8; 3] = transmute(vld3_lane_u8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35438,8 +35438,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_u16() { let a: [u16; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4]; - let b: [u16x4; 3] = [u16x4::new(0, 2, 2, 14), u16x4::new(2, 16, 17, 18), u16x4::new(2, 20, 21, 22)]; - let e: [u16x4; 3] = [u16x4::new(1, 2, 2, 14), u16x4::new(2, 16, 17, 18), u16x4::new(2, 20, 21, 22)]; + let b: [u16x4; 3] = [u16x4::new([0, 2, 2, 14]), u16x4::new([2, 16, 17, 18]), u16x4::new([2, 20, 21, 22])]; + let e: [u16x4; 3] = [u16x4::new([1, 2, 2, 14]), u16x4::new([2, 16, 17, 18]), u16x4::new([2, 20, 21, 22])]; let r: [u16x4; 3] = transmute(vld3_lane_u16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35447,8 +35447,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_u32() { let a: [u32; 7] = [0, 1, 2, 2, 4, 5, 6]; - let b: [u32x2; 3] = [u32x2::new(0, 2), u32x2::new(2, 14), u32x2::new(2, 16)]; - let e: [u32x2; 3] = [u32x2::new(1, 2), u32x2::new(2, 14), u32x2::new(2, 16)]; + let b: [u32x2; 3] = [u32x2::new([0, 2]), u32x2::new([2, 14]), u32x2::new([2, 16])]; + let e: [u32x2; 3] = [u32x2::new([1, 2]), u32x2::new([2, 14]), u32x2::new([2, 16])]; let r: [u32x2; 3] = transmute(vld3_lane_u32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35456,8 +35456,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_u16() { let a: [u16; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u16x8; 3] = [u16x8::new(0, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26), u16x8::new(11, 12, 13, 14, 15, 16, 17, 18)]; - let e: [u16x8; 3] = [u16x8::new(1, 2, 2, 14, 2, 16, 17, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26), u16x8::new(2, 12, 13, 14, 15, 16, 17, 18)]; + let b: [u16x8; 3] = [u16x8::new([0, 2, 2, 14, 2, 16, 17, 18]), u16x8::new([2, 20, 21, 22, 23, 24, 25, 26]), u16x8::new([11, 12, 13, 14, 15, 16, 17, 18])]; + let e: [u16x8; 3] = [u16x8::new([1, 2, 2, 14, 2, 16, 17, 18]), u16x8::new([2, 20, 21, 22, 23, 24, 25, 26]), u16x8::new([2, 12, 13, 14, 15, 16, 17, 18])]; let r: [u16x8; 3] = transmute(vld3q_lane_u16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35465,8 +35465,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_u32() { let a: [u32; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4]; - let b: [u32x4; 3] = [u32x4::new(0, 2, 2, 14), u32x4::new(2, 16, 17, 18), u32x4::new(2, 20, 21, 22)]; - let e: [u32x4; 3] = [u32x4::new(1, 2, 2, 14), u32x4::new(2, 16, 17, 18), u32x4::new(2, 20, 21, 22)]; + let b: [u32x4; 3] = [u32x4::new([0, 2, 2, 14]), u32x4::new([2, 16, 17, 18]), u32x4::new([2, 20, 21, 22])]; + let e: [u32x4; 3] = [u32x4::new([1, 2, 2, 14]), u32x4::new([2, 16, 17, 18]), u32x4::new([2, 20, 21, 22])]; let r: [u32x4; 3] = transmute(vld3q_lane_u32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35474,8 +35474,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_p8() { let a: [u8; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x8; 3] = [i8x8::new(0, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 17, 18)]; - let e: [i8x8; 3] = [i8x8::new(1, 2, 2, 14, 2, 16, 17, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 17, 18)]; + let b: [i8x8; 3] = [i8x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i8x8::new([11, 12, 13, 14, 15, 16, 17, 18])]; + let e: [i8x8; 3] = [i8x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i8x8::new([2, 12, 13, 14, 15, 16, 17, 18])]; let r: [i8x8; 3] = transmute(vld3_lane_p8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35483,8 +35483,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_p16() { let a: [u16; 13] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4]; - let b: [i16x4; 3] = [i16x4::new(0, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)]; - let e: [i16x4; 3] = [i16x4::new(1, 2, 2, 14), i16x4::new(2, 16, 17, 18), i16x4::new(2, 20, 21, 22)]; + let b: [i16x4; 3] = [i16x4::new([0, 2, 2, 14]), i16x4::new([2, 16, 17, 18]), i16x4::new([2, 20, 21, 22])]; + let e: [i16x4; 3] = [i16x4::new([1, 2, 2, 14]), i16x4::new([2, 16, 17, 18]), i16x4::new([2, 20, 21, 22])]; let r: [i16x4; 3] = transmute(vld3_lane_p16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35492,8 +35492,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_p16() { let a: [u16; 25] = [0, 1, 2, 2, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x8; 3] = [i16x8::new(0, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 17, 18)]; - let e: [i16x8; 3] = [i16x8::new(1, 2, 2, 14, 2, 16, 17, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 17, 18)]; + let b: [i16x8; 3] = [i16x8::new([0, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i16x8::new([11, 12, 13, 14, 15, 16, 17, 18])]; + let e: [i16x8; 3] = [i16x8::new([1, 2, 2, 14, 2, 16, 17, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26]), i16x8::new([2, 12, 13, 14, 15, 16, 17, 18])]; let r: [i16x8; 3] = transmute(vld3q_lane_p16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35501,8 +35501,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3_lane_f32() { let a: [f32; 7] = [0., 1., 2., 2., 4., 5., 6.]; - let b: [f32x2; 3] = [f32x2::new(0., 2.), f32x2::new(2., 14.), f32x2::new(9., 16.)]; - let e: [f32x2; 3] = [f32x2::new(1., 2.), f32x2::new(2., 14.), f32x2::new(2., 16.)]; + let b: [f32x2; 3] = [f32x2::new([0., 2.]), f32x2::new([2., 14.]), f32x2::new([9., 16.])]; + let e: [f32x2; 3] = [f32x2::new([1., 2.]), f32x2::new([2., 14.]), f32x2::new([2., 16.])]; let r: [f32x2; 3] = transmute(vld3_lane_f32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35510,8 +35510,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld3q_lane_f32() { let a: [f32; 13] = [0., 1., 2., 2., 4., 5., 6., 7., 8., 5., 6., 7., 8.]; - let b: [f32x4; 3] = [f32x4::new(0., 2., 2., 14.), f32x4::new(9., 16., 17., 18.), f32x4::new(5., 6., 7., 8.)]; - let e: [f32x4; 3] = [f32x4::new(1., 2., 2., 14.), f32x4::new(2., 16., 17., 18.), f32x4::new(2., 6., 7., 8.)]; + let b: [f32x4; 3] = [f32x4::new([0., 2., 2., 14.]), f32x4::new([9., 16., 17., 18.]), f32x4::new([5., 6., 7., 8.])]; + let e: [f32x4; 3] = [f32x4::new([1., 2., 2., 14.]), f32x4::new([2., 16., 17., 18.]), f32x4::new([2., 6., 7., 8.])]; let r: [f32x4; 3] = transmute(vld3q_lane_f32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35519,7 +35519,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_s8() { let a: [i8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]; - let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 6, 2, 6, 6, 8), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(6, 8, 8, 16, 8, 16, 16, 32)]; + let e: [i8x8; 4] = [i8x8::new([1, 2, 2, 6, 2, 6, 6, 8]), i8x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i8x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i8x8::new([6, 8, 8, 16, 8, 16, 16, 32])]; let r: [i8x8; 4] = transmute(vld4_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35527,7 +35527,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_s16() { let a: [i16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]; - let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 6), i16x4::new(2, 6, 6, 8), i16x4::new(2, 6, 6, 8), i16x4::new(6, 8, 8, 16)]; + let e: [i16x4; 4] = [i16x4::new([1, 2, 2, 6]), i16x4::new([2, 6, 6, 8]), i16x4::new([2, 6, 6, 8]), i16x4::new([6, 8, 8, 16])]; let r: [i16x4; 4] = transmute(vld4_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35535,7 +35535,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_s32() { let a: [i32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8]; - let e: [i32x2; 4] = [i32x2::new(1, 2), i32x2::new(2, 6), i32x2::new(2, 6), i32x2::new(6, 8)]; + let e: [i32x2; 4] = [i32x2::new([1, 2]), i32x2::new([2, 6]), i32x2::new([2, 6]), i32x2::new([6, 8])]; let r: [i32x2; 4] = transmute(vld4_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35543,7 +35543,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_s8() { let a: [i8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64]; - let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48), i8x16::new(6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64)]; + let e: [i8x16; 4] = [i8x16::new([1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]), i8x16::new([2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]), i8x16::new([2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48]), i8x16::new([6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64])]; let r: [i8x16; 4] = transmute(vld4q_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35551,7 +35551,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_s16() { let a: [i16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]; - let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 6, 2, 6, 6, 8), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(6, 8, 8, 16, 8, 16, 16, 32)]; + let e: [i16x8; 4] = [i16x8::new([1, 2, 2, 6, 2, 6, 6, 8]), i16x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i16x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i16x8::new([6, 8, 8, 16, 8, 16, 16, 32])]; let r: [i16x8; 4] = transmute(vld4q_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35559,7 +35559,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_s32() { let a: [i32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]; - let e: [i32x4; 4] = [i32x4::new(1, 2, 2, 6), i32x4::new(2, 6, 6, 8), i32x4::new(2, 6, 6, 8), i32x4::new(6, 8, 8, 16)]; + let e: [i32x4; 4] = [i32x4::new([1, 2, 2, 6]), i32x4::new([2, 6, 6, 8]), i32x4::new([2, 6, 6, 8]), i32x4::new([6, 8, 8, 16])]; let r: [i32x4; 4] = transmute(vld4q_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35567,7 +35567,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_s64() { let a: [i64; 5] = [0, 1, 2, 2, 6]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(6)]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2]), i64x1::new([6])]; let r: [i64x1; 4] = transmute(vld4_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35575,7 +35575,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_u8() { let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]; - let e: [u8x8; 4] = [u8x8::new(1, 2, 2, 6, 2, 6, 6, 8), u8x8::new(2, 6, 6, 8, 6, 8, 8, 16), u8x8::new(2, 6, 6, 8, 6, 8, 8, 16), u8x8::new(6, 8, 8, 16, 8, 16, 16, 32)]; + let e: [u8x8; 4] = [u8x8::new([1, 2, 2, 6, 2, 6, 6, 8]), u8x8::new([2, 6, 6, 8, 6, 8, 8, 16]), u8x8::new([2, 6, 6, 8, 6, 8, 8, 16]), u8x8::new([6, 8, 8, 16, 8, 16, 16, 32])]; let r: [u8x8; 4] = transmute(vld4_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35583,7 +35583,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_u16() { let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]; - let e: [u16x4; 4] = [u16x4::new(1, 2, 2, 6), u16x4::new(2, 6, 6, 8), u16x4::new(2, 6, 6, 8), u16x4::new(6, 8, 8, 16)]; + let e: [u16x4; 4] = [u16x4::new([1, 2, 2, 6]), u16x4::new([2, 6, 6, 8]), u16x4::new([2, 6, 6, 8]), u16x4::new([6, 8, 8, 16])]; let r: [u16x4; 4] = transmute(vld4_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35591,7 +35591,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_u32() { let a: [u32; 9] = [0, 1, 2, 2, 6, 2, 6, 6, 8]; - let e: [u32x2; 4] = [u32x2::new(1, 2), u32x2::new(2, 6), u32x2::new(2, 6), u32x2::new(6, 8)]; + let e: [u32x2; 4] = [u32x2::new([1, 2]), u32x2::new([2, 6]), u32x2::new([2, 6]), u32x2::new([6, 8])]; let r: [u32x2; 4] = transmute(vld4_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35599,7 +35599,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_u8() { let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64]; - let e: [u8x16; 4] = [u8x16::new(1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16), u8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32), u8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48), u8x16::new(6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64)]; + let e: [u8x16; 4] = [u8x16::new([1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]), u8x16::new([2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]), u8x16::new([2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48]), u8x16::new([6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64])]; let r: [u8x16; 4] = transmute(vld4q_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35607,7 +35607,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_u16() { let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]; - let e: [u16x8; 4] = [u16x8::new(1, 2, 2, 6, 2, 6, 6, 8), u16x8::new(2, 6, 6, 8, 6, 8, 8, 16), u16x8::new(2, 6, 6, 8, 6, 8, 8, 16), u16x8::new(6, 8, 8, 16, 8, 16, 16, 32)]; + let e: [u16x8; 4] = [u16x8::new([1, 2, 2, 6, 2, 6, 6, 8]), u16x8::new([2, 6, 6, 8, 6, 8, 8, 16]), u16x8::new([2, 6, 6, 8, 6, 8, 8, 16]), u16x8::new([6, 8, 8, 16, 8, 16, 16, 32])]; let r: [u16x8; 4] = transmute(vld4q_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35615,7 +35615,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_u32() { let a: [u32; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]; - let e: [u32x4; 4] = [u32x4::new(1, 2, 2, 6), u32x4::new(2, 6, 6, 8), u32x4::new(2, 6, 6, 8), u32x4::new(6, 8, 8, 16)]; + let e: [u32x4; 4] = [u32x4::new([1, 2, 2, 6]), u32x4::new([2, 6, 6, 8]), u32x4::new([2, 6, 6, 8]), u32x4::new([6, 8, 8, 16])]; let r: [u32x4; 4] = transmute(vld4q_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35623,7 +35623,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_p8() { let a: [u8; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]; - let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 6, 2, 6, 6, 8), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(2, 6, 6, 8, 6, 8, 8, 16), i8x8::new(6, 8, 8, 16, 8, 16, 16, 32)]; + let e: [i8x8; 4] = [i8x8::new([1, 2, 2, 6, 2, 6, 6, 8]), i8x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i8x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i8x8::new([6, 8, 8, 16, 8, 16, 16, 32])]; let r: [i8x8; 4] = transmute(vld4_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35631,7 +35631,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_p16() { let a: [u16; 17] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]; - let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 6), i16x4::new(2, 6, 6, 8), i16x4::new(2, 6, 6, 8), i16x4::new(6, 8, 8, 16)]; + let e: [i16x4; 4] = [i16x4::new([1, 2, 2, 6]), i16x4::new([2, 6, 6, 8]), i16x4::new([2, 6, 6, 8]), i16x4::new([6, 8, 8, 16])]; let r: [i16x4; 4] = transmute(vld4_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35639,7 +35639,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_p8() { let a: [u8; 65] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48, 6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64]; - let e: [i8x16; 4] = [i8x16::new(1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32), i8x16::new(2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48), i8x16::new(6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64)]; + let e: [i8x16; 4] = [i8x16::new([1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16]), i8x16::new([2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]), i8x16::new([2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 43, 44, 8, 16, 44, 48]), i8x16::new([6, 8, 8, 16, 8, 16, 16, 32, 8, 16, 44, 48, 16, 32, 48, 64])]; let r: [i8x16; 4] = transmute(vld4q_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35647,7 +35647,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_p16() { let a: [u16; 33] = [0, 1, 2, 2, 6, 2, 6, 6, 8, 2, 6, 6, 8, 6, 8, 8, 16, 2, 6, 6, 8, 6, 8, 8, 16, 6, 8, 8, 16, 8, 16, 16, 32]; - let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 6, 2, 6, 6, 8), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(2, 6, 6, 8, 6, 8, 8, 16), i16x8::new(6, 8, 8, 16, 8, 16, 16, 32)]; + let e: [i16x8; 4] = [i16x8::new([1, 2, 2, 6, 2, 6, 6, 8]), i16x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i16x8::new([2, 6, 6, 8, 6, 8, 8, 16]), i16x8::new([6, 8, 8, 16, 8, 16, 16, 32])]; let r: [i16x8; 4] = transmute(vld4q_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35655,7 +35655,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_u64() { let a: [u64; 5] = [0, 1, 2, 2, 6]; - let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(2), u64x1::new(2), u64x1::new(6)]; + let e: [u64x1; 4] = [u64x1::new([1]), u64x1::new([2]), u64x1::new([2]), u64x1::new([6])]; let r: [u64x1; 4] = transmute(vld4_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35663,7 +35663,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_p64() { let a: [u64; 5] = [0, 1, 2, 2, 6]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(2), i64x1::new(2), i64x1::new(6)]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([2]), i64x1::new([2]), i64x1::new([6])]; let r: [i64x1; 4] = transmute(vld4_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35671,7 +35671,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_f32() { let a: [f32; 9] = [0., 1., 2., 2., 6., 2., 6., 6., 8.]; - let e: [f32x2; 4] = [f32x2::new(1., 2.), f32x2::new(2., 6.), f32x2::new(2., 6.), f32x2::new(6., 8.)]; + let e: [f32x2; 4] = [f32x2::new([1., 2.]), f32x2::new([2., 6.]), f32x2::new([2., 6.]), f32x2::new([6., 8.])]; let r: [f32x2; 4] = transmute(vld4_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35679,7 +35679,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_f32() { let a: [f32; 17] = [0., 1., 2., 2., 6., 2., 6., 6., 8., 2., 6., 6., 8., 6., 8., 15., 16.]; - let e: [f32x4; 4] = [f32x4::new(1., 2., 2., 6.), f32x4::new(2., 6., 6., 8.), f32x4::new(2., 6., 6., 15.), f32x4::new(6., 8., 8., 16.)]; + let e: [f32x4; 4] = [f32x4::new([1., 2., 2., 6.]), f32x4::new([2., 6., 6., 8.]), f32x4::new([2., 6., 6., 15.]), f32x4::new([6., 8., 8., 16.])]; let r: [f32x4; 4] = transmute(vld4q_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35687,7 +35687,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_s8() { let a: [i8; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x8; 4] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x8; 4] = [i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x8; 4] = transmute(vld4_dup_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35695,7 +35695,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_s16() { let a: [i16; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x4; 4] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)]; + let e: [i16x4; 4] = [i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1])]; let r: [i16x4; 4] = transmute(vld4_dup_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35703,7 +35703,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_s32() { let a: [i32; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5]; - let e: [i32x2; 4] = [i32x2::new(1, 1), i32x2::new(1, 1), i32x2::new(1, 1), i32x2::new(1, 1)]; + let e: [i32x2; 4] = [i32x2::new([1, 1]), i32x2::new([1, 1]), i32x2::new([1, 1]), i32x2::new([1, 1])]; let r: [i32x2; 4] = transmute(vld4_dup_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35711,7 +35711,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_s8() { let a: [i8; 65] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x16; 4] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x16; 4] = [i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x16; 4] = transmute(vld4q_dup_s8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35719,7 +35719,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_s16() { let a: [i16; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x8; 4] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i16x8; 4] = [i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i16x8; 4] = transmute(vld4q_dup_s16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35727,7 +35727,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_s32() { let a: [i32; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i32x4; 4] = [i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1), i32x4::new(1, 1, 1, 1)]; + let e: [i32x4; 4] = [i32x4::new([1, 1, 1, 1]), i32x4::new([1, 1, 1, 1]), i32x4::new([1, 1, 1, 1]), i32x4::new([1, 1, 1, 1])]; let r: [i32x4; 4] = transmute(vld4q_dup_s32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35735,7 +35735,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_s64() { let a: [i64; 5] = [0, 1, 1, 1, 1]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(1), i64x1::new(1), i64x1::new(1)]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([1]), i64x1::new([1]), i64x1::new([1])]; let r: [i64x1; 4] = transmute(vld4_dup_s64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35743,7 +35743,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_u8() { let a: [u8; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [u8x8; 4] = [u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1), u8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u8x8; 4] = [u8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u8x8; 4] = transmute(vld4_dup_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35751,7 +35751,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_u16() { let a: [u16; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [u16x4; 4] = [u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1), u16x4::new(1, 1, 1, 1)]; + let e: [u16x4; 4] = [u16x4::new([1, 1, 1, 1]), u16x4::new([1, 1, 1, 1]), u16x4::new([1, 1, 1, 1]), u16x4::new([1, 1, 1, 1])]; let r: [u16x4; 4] = transmute(vld4_dup_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35759,7 +35759,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_u32() { let a: [u32; 9] = [0, 1, 1, 1, 1, 2, 4, 3, 5]; - let e: [u32x2; 4] = [u32x2::new(1, 1), u32x2::new(1, 1), u32x2::new(1, 1), u32x2::new(1, 1)]; + let e: [u32x2; 4] = [u32x2::new([1, 1]), u32x2::new([1, 1]), u32x2::new([1, 1]), u32x2::new([1, 1])]; let r: [u32x2; 4] = transmute(vld4_dup_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35767,7 +35767,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_u8() { let a: [u8; 65] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [u8x16; 4] = [u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), u8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u8x16; 4] = [u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), u8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u8x16; 4] = transmute(vld4q_dup_u8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35775,7 +35775,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_u16() { let a: [u16; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [u16x8; 4] = [u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1), u16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [u16x8; 4] = [u16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), u16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [u16x8; 4] = transmute(vld4q_dup_u16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35783,7 +35783,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_u32() { let a: [u32; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [u32x4; 4] = [u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1), u32x4::new(1, 1, 1, 1)]; + let e: [u32x4; 4] = [u32x4::new([1, 1, 1, 1]), u32x4::new([1, 1, 1, 1]), u32x4::new([1, 1, 1, 1]), u32x4::new([1, 1, 1, 1])]; let r: [u32x4; 4] = transmute(vld4q_dup_u32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35791,7 +35791,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_p8() { let a: [u8; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x8; 4] = [i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1), i8x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x8; 4] = [i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i8x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x8; 4] = transmute(vld4_dup_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35799,7 +35799,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_p16() { let a: [u16; 17] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x4; 4] = [i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1), i16x4::new(1, 1, 1, 1)]; + let e: [i16x4; 4] = [i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1]), i16x4::new([1, 1, 1, 1])]; let r: [i16x4; 4] = transmute(vld4_dup_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35807,7 +35807,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_p8() { let a: [u8; 65] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i8x16; 4] = [i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), i8x16::new(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i8x16; 4] = [i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), i8x16::new([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i8x16; 4] = transmute(vld4q_dup_p8(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35815,7 +35815,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_p16() { let a: [u16; 33] = [0, 1, 1, 1, 1, 2, 4, 3, 5, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9, 8, 6, 3, 7, 4, 8, 5, 9]; - let e: [i16x8; 4] = [i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1), i16x8::new(1, 1, 1, 1, 1, 1, 1, 1)]; + let e: [i16x8; 4] = [i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1]), i16x8::new([1, 1, 1, 1, 1, 1, 1, 1])]; let r: [i16x8; 4] = transmute(vld4q_dup_p16(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35823,7 +35823,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_u64() { let a: [u64; 5] = [0, 1, 1, 1, 1]; - let e: [u64x1; 4] = [u64x1::new(1), u64x1::new(1), u64x1::new(1), u64x1::new(1)]; + let e: [u64x1; 4] = [u64x1::new([1]), u64x1::new([1]), u64x1::new([1]), u64x1::new([1])]; let r: [u64x1; 4] = transmute(vld4_dup_u64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35831,7 +35831,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_p64() { let a: [u64; 5] = [0, 1, 1, 1, 1]; - let e: [i64x1; 4] = [i64x1::new(1), i64x1::new(1), i64x1::new(1), i64x1::new(1)]; + let e: [i64x1; 4] = [i64x1::new([1]), i64x1::new([1]), i64x1::new([1]), i64x1::new([1])]; let r: [i64x1; 4] = transmute(vld4_dup_p64(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35839,7 +35839,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_dup_f32() { let a: [f32; 9] = [0., 1., 1., 1., 1., 6., 4., 3., 5.]; - let e: [f32x2; 4] = [f32x2::new(1., 1.), f32x2::new(1., 1.), f32x2::new(1., 1.), f32x2::new(1., 1.)]; + let e: [f32x2; 4] = [f32x2::new([1., 1.]), f32x2::new([1., 1.]), f32x2::new([1., 1.]), f32x2::new([1., 1.])]; let r: [f32x2; 4] = transmute(vld4_dup_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35847,7 +35847,7 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_dup_f32() { let a: [f32; 17] = [0., 1., 1., 1., 1., 6., 4., 3., 5., 7., 4., 3., 5., 8., 4., 3., 5.]; - let e: [f32x4; 4] = [f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.), f32x4::new(1., 1., 1., 1.)]; + let e: [f32x4; 4] = [f32x4::new([1., 1., 1., 1.]), f32x4::new([1., 1., 1., 1.]), f32x4::new([1., 1., 1., 1.]), f32x4::new([1., 1., 1., 1.])]; let r: [f32x4; 4] = transmute(vld4q_dup_f32(a[1..].as_ptr())); assert_eq!(r, e); } @@ -35855,8 +35855,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_s8() { let a: [i8; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x8; 4] = [i8x8::new(0, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i8x8; 4] = [i8x8::new([0, 2, 2, 2, 2, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i8x8::new([11, 12, 13, 14, 15, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i8x8; 4] = [i8x8::new([1, 2, 2, 2, 2, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i8x8::new([2, 12, 13, 14, 15, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i8x8; 4] = transmute(vld4_lane_s8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35864,8 +35864,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_s16() { let a: [i16; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x4; 4] = [i16x4::new(0, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)]; - let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)]; + let b: [i16x4; 4] = [i16x4::new([0, 2, 2, 2]), i16x4::new([2, 16, 2, 18]), i16x4::new([2, 20, 21, 22]), i16x4::new([2, 24, 25, 26])]; + let e: [i16x4; 4] = [i16x4::new([1, 2, 2, 2]), i16x4::new([2, 16, 2, 18]), i16x4::new([2, 20, 21, 22]), i16x4::new([2, 24, 25, 26])]; let r: [i16x4; 4] = transmute(vld4_lane_s16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35873,8 +35873,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_s32() { let a: [i32; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8]; - let b: [i32x2; 4] = [i32x2::new(0, 2), i32x2::new(2, 2), i32x2::new(2, 16), i32x2::new(2, 18)]; - let e: [i32x2; 4] = [i32x2::new(1, 2), i32x2::new(2, 2), i32x2::new(2, 16), i32x2::new(2, 18)]; + let b: [i32x2; 4] = [i32x2::new([0, 2]), i32x2::new([2, 2]), i32x2::new([2, 16]), i32x2::new([2, 18])]; + let e: [i32x2; 4] = [i32x2::new([1, 2]), i32x2::new([2, 2]), i32x2::new([2, 16]), i32x2::new([2, 18])]; let r: [i32x2; 4] = transmute(vld4_lane_s32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35882,8 +35882,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_s16() { let a: [i16; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x8; 4] = [i16x8::new(0, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i16x8; 4] = [i16x8::new([0, 2, 2, 2, 2, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i16x8::new([11, 12, 13, 14, 15, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i16x8; 4] = [i16x8::new([1, 2, 2, 2, 2, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i16x8::new([2, 12, 13, 14, 15, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i16x8; 4] = transmute(vld4q_lane_s16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35891,8 +35891,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_s32() { let a: [i32; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i32x4; 4] = [i32x4::new(0, 2, 2, 2), i32x4::new(2, 16, 2, 18), i32x4::new(2, 20, 21, 22), i32x4::new(2, 24, 25, 26)]; - let e: [i32x4; 4] = [i32x4::new(1, 2, 2, 2), i32x4::new(2, 16, 2, 18), i32x4::new(2, 20, 21, 22), i32x4::new(2, 24, 25, 26)]; + let b: [i32x4; 4] = [i32x4::new([0, 2, 2, 2]), i32x4::new([2, 16, 2, 18]), i32x4::new([2, 20, 21, 22]), i32x4::new([2, 24, 25, 26])]; + let e: [i32x4; 4] = [i32x4::new([1, 2, 2, 2]), i32x4::new([2, 16, 2, 18]), i32x4::new([2, 20, 21, 22]), i32x4::new([2, 24, 25, 26])]; let r: [i32x4; 4] = transmute(vld4q_lane_s32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35900,8 +35900,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_u8() { let a: [u8; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u8x8; 4] = [u8x8::new(0, 2, 2, 2, 2, 16, 2, 18), u8x8::new(2, 20, 21, 22, 2, 24, 25, 26), u8x8::new(11, 12, 13, 14, 15, 16, 2, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [u8x8; 4] = [u8x8::new(1, 2, 2, 2, 2, 16, 2, 18), u8x8::new(2, 20, 21, 22, 2, 24, 25, 26), u8x8::new(2, 12, 13, 14, 15, 16, 2, 18), u8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [u8x8; 4] = [u8x8::new([0, 2, 2, 2, 2, 16, 2, 18]), u8x8::new([2, 20, 21, 22, 2, 24, 25, 26]), u8x8::new([11, 12, 13, 14, 15, 16, 2, 18]), u8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [u8x8; 4] = [u8x8::new([1, 2, 2, 2, 2, 16, 2, 18]), u8x8::new([2, 20, 21, 22, 2, 24, 25, 26]), u8x8::new([2, 12, 13, 14, 15, 16, 2, 18]), u8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [u8x8; 4] = transmute(vld4_lane_u8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35909,8 +35909,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_u16() { let a: [u16; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u16x4; 4] = [u16x4::new(0, 2, 2, 2), u16x4::new(2, 16, 2, 18), u16x4::new(2, 20, 21, 22), u16x4::new(2, 24, 25, 26)]; - let e: [u16x4; 4] = [u16x4::new(1, 2, 2, 2), u16x4::new(2, 16, 2, 18), u16x4::new(2, 20, 21, 22), u16x4::new(2, 24, 25, 26)]; + let b: [u16x4; 4] = [u16x4::new([0, 2, 2, 2]), u16x4::new([2, 16, 2, 18]), u16x4::new([2, 20, 21, 22]), u16x4::new([2, 24, 25, 26])]; + let e: [u16x4; 4] = [u16x4::new([1, 2, 2, 2]), u16x4::new([2, 16, 2, 18]), u16x4::new([2, 20, 21, 22]), u16x4::new([2, 24, 25, 26])]; let r: [u16x4; 4] = transmute(vld4_lane_u16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35918,8 +35918,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_u32() { let a: [u32; 9] = [0, 1, 2, 2, 2, 5, 6, 7, 8]; - let b: [u32x2; 4] = [u32x2::new(0, 2), u32x2::new(2, 2), u32x2::new(2, 16), u32x2::new(2, 18)]; - let e: [u32x2; 4] = [u32x2::new(1, 2), u32x2::new(2, 2), u32x2::new(2, 16), u32x2::new(2, 18)]; + let b: [u32x2; 4] = [u32x2::new([0, 2]), u32x2::new([2, 2]), u32x2::new([2, 16]), u32x2::new([2, 18])]; + let e: [u32x2; 4] = [u32x2::new([1, 2]), u32x2::new([2, 2]), u32x2::new([2, 16]), u32x2::new([2, 18])]; let r: [u32x2; 4] = transmute(vld4_lane_u32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35927,8 +35927,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_u16() { let a: [u16; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u16x8; 4] = [u16x8::new(0, 2, 2, 2, 2, 16, 2, 18), u16x8::new(2, 20, 21, 22, 2, 24, 25, 26), u16x8::new(11, 12, 13, 14, 15, 16, 2, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [u16x8; 4] = [u16x8::new(1, 2, 2, 2, 2, 16, 2, 18), u16x8::new(2, 20, 21, 22, 2, 24, 25, 26), u16x8::new(2, 12, 13, 14, 15, 16, 2, 18), u16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [u16x8; 4] = [u16x8::new([0, 2, 2, 2, 2, 16, 2, 18]), u16x8::new([2, 20, 21, 22, 2, 24, 25, 26]), u16x8::new([11, 12, 13, 14, 15, 16, 2, 18]), u16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [u16x8; 4] = [u16x8::new([1, 2, 2, 2, 2, 16, 2, 18]), u16x8::new([2, 20, 21, 22, 2, 24, 25, 26]), u16x8::new([2, 12, 13, 14, 15, 16, 2, 18]), u16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [u16x8; 4] = transmute(vld4q_lane_u16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35936,8 +35936,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_u32() { let a: [u32; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [u32x4; 4] = [u32x4::new(0, 2, 2, 2), u32x4::new(2, 16, 2, 18), u32x4::new(2, 20, 21, 22), u32x4::new(2, 24, 25, 26)]; - let e: [u32x4; 4] = [u32x4::new(1, 2, 2, 2), u32x4::new(2, 16, 2, 18), u32x4::new(2, 20, 21, 22), u32x4::new(2, 24, 25, 26)]; + let b: [u32x4; 4] = [u32x4::new([0, 2, 2, 2]), u32x4::new([2, 16, 2, 18]), u32x4::new([2, 20, 21, 22]), u32x4::new([2, 24, 25, 26])]; + let e: [u32x4; 4] = [u32x4::new([1, 2, 2, 2]), u32x4::new([2, 16, 2, 18]), u32x4::new([2, 20, 21, 22]), u32x4::new([2, 24, 25, 26])]; let r: [u32x4; 4] = transmute(vld4q_lane_u32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35945,8 +35945,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_p8() { let a: [u8; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i8x8; 4] = [i8x8::new(0, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(11, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i8x8; 4] = [i8x8::new(1, 2, 2, 2, 2, 16, 2, 18), i8x8::new(2, 20, 21, 22, 2, 24, 25, 26), i8x8::new(2, 12, 13, 14, 15, 16, 2, 18), i8x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i8x8; 4] = [i8x8::new([0, 2, 2, 2, 2, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i8x8::new([11, 12, 13, 14, 15, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i8x8; 4] = [i8x8::new([1, 2, 2, 2, 2, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i8x8::new([2, 12, 13, 14, 15, 16, 2, 18]), i8x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i8x8; 4] = transmute(vld4_lane_p8::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35954,8 +35954,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_p16() { let a: [u16; 17] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x4; 4] = [i16x4::new(0, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)]; - let e: [i16x4; 4] = [i16x4::new(1, 2, 2, 2), i16x4::new(2, 16, 2, 18), i16x4::new(2, 20, 21, 22), i16x4::new(2, 24, 25, 26)]; + let b: [i16x4; 4] = [i16x4::new([0, 2, 2, 2]), i16x4::new([2, 16, 2, 18]), i16x4::new([2, 20, 21, 22]), i16x4::new([2, 24, 25, 26])]; + let e: [i16x4; 4] = [i16x4::new([1, 2, 2, 2]), i16x4::new([2, 16, 2, 18]), i16x4::new([2, 20, 21, 22]), i16x4::new([2, 24, 25, 26])]; let r: [i16x4; 4] = transmute(vld4_lane_p16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35963,8 +35963,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_p16() { let a: [u16; 33] = [0, 1, 2, 2, 2, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8]; - let b: [i16x8; 4] = [i16x8::new(0, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(11, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; - let e: [i16x8; 4] = [i16x8::new(1, 2, 2, 2, 2, 16, 2, 18), i16x8::new(2, 20, 21, 22, 2, 24, 25, 26), i16x8::new(2, 12, 13, 14, 15, 16, 2, 18), i16x8::new(2, 20, 21, 22, 23, 24, 25, 26)]; + let b: [i16x8; 4] = [i16x8::new([0, 2, 2, 2, 2, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i16x8::new([11, 12, 13, 14, 15, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; + let e: [i16x8; 4] = [i16x8::new([1, 2, 2, 2, 2, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 2, 24, 25, 26]), i16x8::new([2, 12, 13, 14, 15, 16, 2, 18]), i16x8::new([2, 20, 21, 22, 23, 24, 25, 26])]; let r: [i16x8; 4] = transmute(vld4q_lane_p16::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35972,8 +35972,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4_lane_f32() { let a: [f32; 9] = [0., 1., 2., 2., 2., 5., 6., 7., 8.]; - let b: [f32x2; 4] = [f32x2::new(0., 2.), f32x2::new(2., 2.), f32x2::new(2., 16.), f32x2::new(2., 18.)]; - let e: [f32x2; 4] = [f32x2::new(1., 2.), f32x2::new(2., 2.), f32x2::new(2., 16.), f32x2::new(2., 18.)]; + let b: [f32x2; 4] = [f32x2::new([0., 2.]), f32x2::new([2., 2.]), f32x2::new([2., 16.]), f32x2::new([2., 18.])]; + let e: [f32x2; 4] = [f32x2::new([1., 2.]), f32x2::new([2., 2.]), f32x2::new([2., 16.]), f32x2::new([2., 18.])]; let r: [f32x2; 4] = transmute(vld4_lane_f32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } @@ -35981,8 +35981,8 @@ mod test { #[simd_test(enable = "neon")] unsafe fn test_vld4q_lane_f32() { let a: [f32; 17] = [0., 1., 2., 2., 2., 5., 6., 7., 8., 5., 6., 7., 8., 1., 4., 3., 5.]; - let b: [f32x4; 4] = [f32x4::new(0., 2., 2., 2.), f32x4::new(2., 16., 2., 18.), f32x4::new(5., 6., 7., 8.), f32x4::new(1., 4., 3., 5.)]; - let e: [f32x4; 4] = [f32x4::new(1., 2., 2., 2.), f32x4::new(2., 16., 2., 18.), f32x4::new(2., 6., 7., 8.), f32x4::new(2., 4., 3., 5.)]; + let b: [f32x4; 4] = [f32x4::new([0., 2., 2., 2.]), f32x4::new([2., 16., 2., 18.]), f32x4::new([5., 6., 7., 8.]), f32x4::new([1., 4., 3., 5.])]; + let e: [f32x4; 4] = [f32x4::new([1., 2., 2., 2.]), f32x4::new([2., 16., 2., 18.]), f32x4::new([2., 6., 7., 8.]), f32x4::new([2., 4., 3., 5.])]; let r: [f32x4; 4] = transmute(vld4q_lane_f32::<0>(a[1..].as_ptr(), transmute(b))); assert_eq!(r, e); } diff --git a/crates/stdarch-gen/src/main.rs b/crates/stdarch-gen/src/main.rs index 652aee88c8..f232dbe796 100644 --- a/crates/stdarch-gen/src/main.rs +++ b/crates/stdarch-gen/src/main.rs @@ -1662,14 +1662,14 @@ fn gen_load_test( } let sub_len = type_len / type_sub_len(out_t); if type_to_global_type(out_t) != "f64" { - let mut sub_output = format!("{}::new(", type_to_global_type(out_t)); + let mut sub_output = format!("{}::new([", type_to_global_type(out_t)); for j in 0..sub_len { if j != 0 { sub_output.push_str(", "); } sub_output.push_str(&v[i * sub_len + j]); } - sub_output.push_str(")"); + sub_output.push_str("])"); output.push_str(&sub_output); } else { output.push_str(&v[i]);