pub struct V3 {}Expand description
AVX instruction set.
Notable additions over V2 include:
- Instructions operating on 256-bit SIMD vectors.
- Shift functions with a separate shift per lane, such as
V3::shl_dyn_u32x4. - Fused multiply-accumulate instructions, such as
V3::mul_add_f32x4.
Fields§
§sse: Sse§sse2: Sse2§fxsr: Fxsr§sse3: Sse3§ssse3: Ssse3§sse4_1: Sse4_1§sse4_2: Sse4_2§popcnt: Popcnt§avx: Avx§avx2: Avx2§bmi1: Bmi1§bmi2: Bmi2§fma: Fma§lzcnt: LzcntImplementations§
Source§impl V3
AVX instruction set.
impl V3
AVX instruction set.
Notable additions over V2 include:
- Instructions operating on 256-bit SIMD vectors.
- Shift functions with a separate shift per lane, such as
V3::shl_dyn_u32x4. - Fused multiply-accumulate instructions, such as
V3::mul_add_f32x4.
Sourcepub unsafe fn new_unchecked() -> Self
pub unsafe fn new_unchecked() -> Self
Returns a SIMD token type without checking if the required CPU features for this type are available.
§Safety
- the required CPU features must be available.
Sourcepub fn try_new() -> Option<Self>
pub fn try_new() -> Option<Self>
Returns a SIMD token type if the required CPU features for this type are
available, otherwise returns None.
Sourcepub fn is_available() -> bool
pub fn is_available() -> bool
Returns true if the required CPU features for this type are available,
otherwise returns false.
Sourcepub fn vectorize<F: NullaryFnOnce>(self, f: F) -> F::Output
pub fn vectorize<F: NullaryFnOnce>(self, f: F) -> F::Output
Vectorizes the given function as if the CPU features for this type were applied to it.
§Note
For the vectorization to work properly, the given function must be inlined.
Consider marking it as #[inline(always)]
Source§impl V3
impl V3
Sourcepub fn and_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn and_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Returns a & b for each bit in a and b.
Sourcepub fn and_m16x16(self, a: m16x16, b: m16x16) -> m16x16
pub fn and_m16x16(self, a: m16x16, b: m16x16) -> m16x16
Returns a & b for each bit in a and b.
Sourcepub fn and_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn and_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Returns a & b for each bit in a and b.
Sourcepub fn andnot_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn andnot_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn andnot_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn andnot_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn andnot_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_i64x4(self, a: i64x4, b: i64x4) -> i64x4
pub fn andnot_i64x4(self, a: i64x4, b: i64x4) -> i64x4
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn andnot_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_m16x16(self, a: m16x16, b: m16x16) -> m16x16
pub fn andnot_m16x16(self, a: m16x16, b: m16x16) -> m16x16
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_m32x8(self, a: m32x8, b: m32x8) -> m32x8
pub fn andnot_m32x8(self, a: m32x8, b: m32x8) -> m32x8
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_m64x4(self, a: m64x4, b: m64x4) -> m64x4
pub fn andnot_m64x4(self, a: m64x4, b: m64x4) -> m64x4
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_m8x32(self, a: m8x32, b: m8x32) -> m8x32
pub fn andnot_m8x32(self, a: m8x32, b: m8x32) -> m8x32
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn andnot_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn andnot_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_u64x4(self, a: u64x4, b: u64x4) -> u64x4
pub fn andnot_u64x4(self, a: u64x4, b: u64x4) -> u64x4
Returns !a & b for each bit in a and b.
Sourcepub fn andnot_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn andnot_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Returns !a & b for each bit in a and b.
Sourcepub fn apply_sign_i16x16(self, sign: i16x16, a: i16x16) -> i16x16
pub fn apply_sign_i16x16(self, sign: i16x16, a: i16x16) -> i16x16
Applies the sign of each element of sign to the corresponding lane in a.
- If
signis zero, the corresponding element is zeroed. - If
signis positive, the corresponding element is returned as is. - If
signis negative, the corresponding element is negated.
Sourcepub fn apply_sign_i32x8(self, sign: i32x8, a: i32x8) -> i32x8
pub fn apply_sign_i32x8(self, sign: i32x8, a: i32x8) -> i32x8
Applies the sign of each element of sign to the corresponding lane in a.
- If
signis zero, the corresponding element is zeroed. - If
signis positive, the corresponding element is returned as is. - If
signis negative, the corresponding element is negated.
Sourcepub fn apply_sign_i8x32(self, sign: i8x32, a: i8x32) -> i8x32
pub fn apply_sign_i8x32(self, sign: i8x32, a: i8x32) -> i8x32
Applies the sign of each element of sign to the corresponding lane in a.
- If
signis zero, the corresponding element is zeroed. - If
signis positive, the corresponding element is returned as is. - If
signis negative, the corresponding element is negated.
Sourcepub fn approx_reciprocal_f32x8(self, a: f32x8) -> f32x8
pub fn approx_reciprocal_f32x8(self, a: f32x8) -> f32x8
Computes the approximate reciprocal of the elements of each lane of a.
Sourcepub fn approx_reciprocal_sqrt_f32x8(self, a: f32x8) -> f32x8
pub fn approx_reciprocal_sqrt_f32x8(self, a: f32x8) -> f32x8
Computes the approximate reciprocal of the square roots of the elements of each lane of a.
Sourcepub fn average_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn average_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Computes average(a, b) for each lane of a and b.
Sourcepub fn average_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn average_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Computes average(a, b) for each lane of a and b.
Sourcepub fn ceil_f32x8(self, a: f32x8) -> f32x8
pub fn ceil_f32x8(self, a: f32x8) -> f32x8
Returns ceil(a) for each lane of a, rounding towards positive infinity.
Sourcepub fn ceil_f64x4(self, a: f64x4) -> f64x4
pub fn ceil_f64x4(self, a: f64x4) -> f64x4
Returns ceil(a) for each lane of a, rounding towards positive infinity.
Sourcepub fn cmp_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_eq_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_eq_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_eq_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_eq_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_eq_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_eq_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_eq_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_eq_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_eq_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a and b for equality.
Sourcepub fn cmp_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_ge_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_ge_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_ge_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_ge_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_ge_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_ge_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_ge_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_ge_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_ge_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a and b for greater-than-or-equal-to.
Sourcepub fn cmp_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_gt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_gt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_gt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_gt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_gt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_gt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_gt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_gt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_gt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a and b for greater-than.
Sourcepub fn cmp_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_le_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_le_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_le_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_le_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_le_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_le_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_le_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_le_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_le_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a and b for less-than-or-equal-to.
Sourcepub fn cmp_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
pub fn cmp_lt_i16x16(self, a: i16x16, b: i16x16) -> m16x16
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
pub fn cmp_lt_i32x8(self, a: i32x8, b: i32x8) -> m32x8
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
pub fn cmp_lt_i64x4(self, a: i64x4, b: i64x4) -> m64x4
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
pub fn cmp_lt_i8x32(self, a: i8x32, b: i8x32) -> m8x32
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
pub fn cmp_lt_u16x16(self, a: u16x16, b: u16x16) -> m16x16
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
pub fn cmp_lt_u32x8(self, a: u32x8, b: u32x8) -> m32x8
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
pub fn cmp_lt_u64x4(self, a: u64x4, b: u64x4) -> m64x4
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_lt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
pub fn cmp_lt_u8x32(self, a: u8x32, b: u8x32) -> m8x32
Compares the elements in each lane of a and b for less-than.
Sourcepub fn cmp_not_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_eq_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for inequality.
Sourcepub fn cmp_not_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_eq_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for inequality.
Sourcepub fn cmp_not_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_ge_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for not-greater-than-or-equal.
Sourcepub fn cmp_not_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_ge_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for not-greater-than-or-equal.
Sourcepub fn cmp_not_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_gt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for not-greater-than.
Sourcepub fn cmp_not_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_gt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for not-greater-than.
Sourcepub fn cmp_not_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_le_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for not-less-than-or-equal.
Sourcepub fn cmp_not_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_le_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for not-less-than-or-equal.
Sourcepub fn cmp_not_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
pub fn cmp_not_lt_f32x8(self, a: f32x8, b: f32x8) -> m32x8
Compares the elements in each lane of a and b for not-less-than.
Sourcepub fn cmp_not_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
pub fn cmp_not_lt_f64x4(self, a: f64x4, b: f64x4) -> m64x4
Compares the elements in each lane of a and b for not-less-than.
Sourcepub fn convert_f32x4_to_f64x4(self, a: f32x4) -> f64x4
pub fn convert_f32x4_to_f64x4(self, a: f32x4) -> f64x4
Converts a f32x4 to f64x4, elementwise.
Sourcepub fn convert_f32x8_to_i32x8(self, a: f32x8) -> i32x8
pub fn convert_f32x8_to_i32x8(self, a: f32x8) -> i32x8
Converts a f32x8 to i32x8, elementwise.
Sourcepub fn convert_f64x4_to_f32x4(self, a: f64x4) -> f32x4
pub fn convert_f64x4_to_f32x4(self, a: f64x4) -> f32x4
Converts a f64x4 to f32x4, elementwise.
Sourcepub fn convert_f64x4_to_i32x4(self, a: f64x4) -> i32x4
pub fn convert_f64x4_to_i32x4(self, a: f64x4) -> i32x4
Converts a f64x4 to i32x4, elementwise.
Sourcepub fn convert_i16x16_to_u16x16(self, a: i16x16) -> u16x16
pub fn convert_i16x16_to_u16x16(self, a: i16x16) -> u16x16
Converts a i16x16 to u16x16, elementwise.
Sourcepub fn convert_i16x8_to_i32x8(self, a: i16x8) -> i32x8
pub fn convert_i16x8_to_i32x8(self, a: i16x8) -> i32x8
Converts a i16x8 to i32x8, elementwise.
Sourcepub fn convert_i16x8_to_i64x4(self, a: i16x8) -> i64x4
pub fn convert_i16x8_to_i64x4(self, a: i16x8) -> i64x4
Converts a i16x8 to i64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_i16x8_to_u32x8(self, a: i16x8) -> u32x8
pub fn convert_i16x8_to_u32x8(self, a: i16x8) -> u32x8
Converts a i16x8 to u32x8, elementwise.
Sourcepub fn convert_i16x8_to_u64x4(self, a: i16x8) -> u64x4
pub fn convert_i16x8_to_u64x4(self, a: i16x8) -> u64x4
Converts a i16x8 to u64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_i32x4_to_f64x4(self, a: i32x4) -> f64x4
pub fn convert_i32x4_to_f64x4(self, a: i32x4) -> f64x4
Converts a i32x4 to f64x4, elementwise.
Sourcepub fn convert_i32x4_to_i64x4(self, a: i32x4) -> i64x4
pub fn convert_i32x4_to_i64x4(self, a: i32x4) -> i64x4
Converts a i32x4 to i64x4, elementwise.
Sourcepub fn convert_i32x4_to_u64x4(self, a: i32x4) -> u64x4
pub fn convert_i32x4_to_u64x4(self, a: i32x4) -> u64x4
Converts a i32x4 to u64x4, elementwise.
Sourcepub fn convert_i32x8_to_f32x8(self, a: i32x8) -> f32x8
pub fn convert_i32x8_to_f32x8(self, a: i32x8) -> f32x8
Converts a i32x8 to f32x8, elementwise.
Sourcepub fn convert_i32x8_to_u32x8(self, a: i32x8) -> u32x8
pub fn convert_i32x8_to_u32x8(self, a: i32x8) -> u32x8
Converts a i32x8 to u32x8, elementwise.
Sourcepub fn convert_i8x16_to_i16x16(self, a: i8x16) -> i16x16
pub fn convert_i8x16_to_i16x16(self, a: i8x16) -> i16x16
Converts a i8x16 to i16x16, elementwise.
Sourcepub fn convert_i8x16_to_i32x8(self, a: i8x16) -> i32x8
pub fn convert_i8x16_to_i32x8(self, a: i8x16) -> i32x8
Converts a i8x16 to i32x8, elementwise, while truncating the extra elements.
Sourcepub fn convert_i8x16_to_i64x4(self, a: i8x16) -> i64x4
pub fn convert_i8x16_to_i64x4(self, a: i8x16) -> i64x4
Converts a i8x16 to i64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_i8x16_to_u16x16(self, a: i8x16) -> u16x16
pub fn convert_i8x16_to_u16x16(self, a: i8x16) -> u16x16
Converts a i8x16 to u16x16, elementwise.
Sourcepub fn convert_i8x16_to_u32x8(self, a: i8x16) -> u32x8
pub fn convert_i8x16_to_u32x8(self, a: i8x16) -> u32x8
Converts a i8x16 to u32x8, elementwise, while truncating the extra elements.
Sourcepub fn convert_i8x16_to_u64x4(self, a: i8x16) -> u64x4
pub fn convert_i8x16_to_u64x4(self, a: i8x16) -> u64x4
Converts a i8x16 to u64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_i8x32_to_u8x32(self, a: i8x32) -> u8x32
pub fn convert_i8x32_to_u8x32(self, a: i8x32) -> u8x32
Converts a i8x32 to u8x32, elementwise.
Sourcepub fn convert_u16x16_to_i16x16(self, a: u16x16) -> i16x16
pub fn convert_u16x16_to_i16x16(self, a: u16x16) -> i16x16
Converts a u16x16 to i16x16, elementwise.
Sourcepub fn convert_u16x8_to_i32x8(self, a: u16x8) -> i32x8
pub fn convert_u16x8_to_i32x8(self, a: u16x8) -> i32x8
Converts a u16x8 to i32x8, elementwise.
Sourcepub fn convert_u16x8_to_i64x4(self, a: u16x8) -> i64x4
pub fn convert_u16x8_to_i64x4(self, a: u16x8) -> i64x4
Converts a u16x8 to i64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_u16x8_to_u32x8(self, a: u16x8) -> u32x8
pub fn convert_u16x8_to_u32x8(self, a: u16x8) -> u32x8
Converts a u16x8 to u32x8, elementwise.
Sourcepub fn convert_u16x8_to_u64x4(self, a: u16x8) -> u64x4
pub fn convert_u16x8_to_u64x4(self, a: u16x8) -> u64x4
Converts a u16x8 to u64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_u32x4_to_i64x4(self, a: u32x4) -> i64x4
pub fn convert_u32x4_to_i64x4(self, a: u32x4) -> i64x4
Converts a u32x4 to i64x4, elementwise.
Sourcepub fn convert_u32x4_to_u64x4(self, a: u32x4) -> u64x4
pub fn convert_u32x4_to_u64x4(self, a: u32x4) -> u64x4
Converts a u32x4 to u64x4, elementwise.
Sourcepub fn convert_u32x8_to_i32x8(self, a: u32x8) -> i32x8
pub fn convert_u32x8_to_i32x8(self, a: u32x8) -> i32x8
Converts a u32x8 to i32x8, elementwise.
Sourcepub fn convert_u8x16_to_i16x16(self, a: u8x16) -> i16x16
pub fn convert_u8x16_to_i16x16(self, a: u8x16) -> i16x16
Converts a u8x16 to i16x16, elementwise.
Sourcepub fn convert_u8x16_to_i32x8(self, a: u8x16) -> i32x8
pub fn convert_u8x16_to_i32x8(self, a: u8x16) -> i32x8
Converts a u8x16 to i32x8, elementwise, while truncating the extra elements.
Sourcepub fn convert_u8x16_to_i64x4(self, a: u8x16) -> i64x4
pub fn convert_u8x16_to_i64x4(self, a: u8x16) -> i64x4
Converts a u8x16 to i64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_u8x16_to_u16x16(self, a: u8x16) -> u16x16
pub fn convert_u8x16_to_u16x16(self, a: u8x16) -> u16x16
Converts a u8x16 to u16x16, elementwise.
Sourcepub fn convert_u8x16_to_u32x8(self, a: u8x16) -> u32x8
pub fn convert_u8x16_to_u32x8(self, a: u8x16) -> u32x8
Converts a u8x16 to u32x8, elementwise, while truncating the extra elements.
Sourcepub fn convert_u8x16_to_u64x4(self, a: u8x16) -> u64x4
pub fn convert_u8x16_to_u64x4(self, a: u8x16) -> u64x4
Converts a u8x16 to u64x4, elementwise, while truncating the extra elements.
Sourcepub fn convert_u8x32_to_i8x32(self, a: u8x32) -> i8x32
pub fn convert_u8x32_to_i8x32(self, a: u8x32) -> i8x32
Converts a u8x32 to i8x32, elementwise.
Sourcepub fn div_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn div_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Divides the elements of each lane of a and b.
Sourcepub fn div_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn div_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Divides the elements of each lane of a and b.
Sourcepub fn floor_f32x8(self, a: f32x8) -> f32x8
pub fn floor_f32x8(self, a: f32x8) -> f32x8
Rounds the elements of each lane of a to the nearest integer towards negative infinity.
Sourcepub fn floor_f64x4(self, a: f64x4) -> f64x4
pub fn floor_f64x4(self, a: f64x4) -> f64x4
Rounds the elements of each lane of a to the nearest integer towards negative infinity.
Sourcepub fn horizontal_add_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn horizontal_add_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
See _mm_hadd_ps.
Sourcepub fn horizontal_add_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn horizontal_add_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
See _mm_hadd_pd.
Sourcepub fn horizontal_add_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn horizontal_add_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
See _mm_hadd_epi16.
Sourcepub fn horizontal_add_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn horizontal_add_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
See _mm_hadd_epi32.
Sourcepub fn horizontal_saturating_add_pack_i16x16(
self,
a: i16x16,
b: i16x16,
) -> i16x16
pub fn horizontal_saturating_add_pack_i16x16( self, a: i16x16, b: i16x16, ) -> i16x16
See _mm_hadds_epi16.
Sourcepub fn horizontal_saturating_sub_pack_i16x16(
self,
a: i16x16,
b: i16x16,
) -> i16x16
pub fn horizontal_saturating_sub_pack_i16x16( self, a: i16x16, b: i16x16, ) -> i16x16
See _mm_hsubs_epi16.
Sourcepub fn horizontal_sub_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn horizontal_sub_pack_f32x8(self, a: f32x8, b: f32x8) -> f32x8
See _mm256_hsub_ps.
Sourcepub fn horizontal_sub_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn horizontal_sub_pack_f64x4(self, a: f64x4, b: f64x4) -> f64x4
See _mm256_hsub_pd.
Sourcepub fn horizontal_sub_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn horizontal_sub_pack_i16x16(self, a: i16x16, b: i16x16) -> i16x16
See _mm256_hsub_epi16.
Sourcepub fn horizontal_sub_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn horizontal_sub_pack_i32x8(self, a: i32x8, b: i32x8) -> i32x8
See _mm256_hsub_epi32.
Sourcepub fn is_nan_f32x8(self, a: f32x8) -> m32x8
pub fn is_nan_f32x8(self, a: f32x8) -> m32x8
Checks if the elements in each lane of a are NaN.
Sourcepub fn is_nan_f64x4(self, a: f64x4) -> m64x4
pub fn is_nan_f64x4(self, a: f64x4) -> m64x4
Checks if the elements in each lane of a are NaN.
Sourcepub fn is_not_nan_f32x8(self, a: f32x8) -> m32x8
pub fn is_not_nan_f32x8(self, a: f32x8) -> m32x8
Checks if the elements in each lane of a are not NaN.
Sourcepub fn is_not_nan_f64x4(self, a: f64x4) -> m64x4
pub fn is_not_nan_f64x4(self, a: f64x4) -> m64x4
Checks if the elements in each lane of a are not NaN.
Sourcepub fn max_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn max_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Computes max(a, b). for each lane in a and b.
Sourcepub fn max_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn max_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Computes max(a, b). for each lane in a and b.
Sourcepub fn max_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn max_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Computes max(a, b). for each lane in a and b.
Sourcepub fn max_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn max_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Computes max(a, b). for each lane in a and b.
Sourcepub fn max_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn max_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Computes max(a, b). for each lane in a and b.
Sourcepub fn max_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn max_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Computes max(a, b). for each lane in a and b.
Sourcepub fn max_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn max_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Computes max(a, b). for each lane in a and b.
Sourcepub fn max_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn max_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Computes max(a, b). for each lane in a and b.
Sourcepub fn min_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn min_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Computes min(a, b). for each lane in a and b.
Sourcepub fn min_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn min_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Computes min(a, b). for each lane in a and b.
Sourcepub fn min_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn min_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Computes min(a, b). for each lane in a and b.
Sourcepub fn min_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn min_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Computes min(a, b). for each lane in a and b.
Sourcepub fn min_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn min_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Computes min(a, b). for each lane in a and b.
Sourcepub fn min_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn min_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Computes min(a, b). for each lane in a and b.
Sourcepub fn min_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn min_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Computes min(a, b). for each lane in a and b.
Sourcepub fn min_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn min_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Computes min(a, b). for each lane in a and b.
Sourcepub fn mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a and b, and adds the results to each lane of
c.
Sourcepub fn mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a and b, and adds the results to each lane of
c.
Sourcepub fn mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a and b, and adds the results to each lane of
c.
Sourcepub fn mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a and b, and adds the results to each lane of
c.
Sourcepub fn mul_addsub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_addsub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a and b, and alternatively adds/subtracts ‘c’
to/from the results.
Sourcepub fn mul_addsub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_addsub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a and b, and alternatively adds/subtracts ‘c’
to/from the results.
Sourcepub fn mul_addsub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_addsub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a and b, and alternatively adds/subtracts ‘c’
to/from the results.
Sourcepub fn mul_addsub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_addsub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a and b, and alternatively adds/subtracts ‘c’
to/from the results.
Sourcepub fn mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the results.
Sourcepub fn mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the results.
Sourcepub fn mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the results.
Sourcepub fn mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the results.
Sourcepub fn mul_subadd_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn mul_subadd_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a and b, and alternatively subtracts/adds ‘c’
to/from the results.
Sourcepub fn mul_subadd_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn mul_subadd_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a and b, and alternatively subtracts/adds ‘c’
to/from the results.
Sourcepub fn mul_subadd_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn mul_subadd_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a and b, and alternatively subtracts/adds ‘c’
to/from the results.
Sourcepub fn mul_subadd_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn mul_subadd_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a and b, and alternatively subtracts/adds ‘c’
to/from the results.
Sourcepub fn multiply_saturating_add_adjacent_i8x32(
self,
a: i8x32,
b: i8x32,
) -> i16x16
pub fn multiply_saturating_add_adjacent_i8x32( self, a: i8x32, b: i8x32, ) -> i16x16
See _mm256_maddubs_epi16.
Sourcepub fn multiply_wrapping_add_adjacent_i16x16(
self,
a: i16x16,
b: i16x16,
) -> i32x8
pub fn multiply_wrapping_add_adjacent_i16x16( self, a: i16x16, b: i16x16, ) -> i32x8
See _mm256_madd_epi16.
Sourcepub fn multisum_of_absolute_differences_u8x32<const OFFSETS: i32>(
self,
a: u8x32,
b: u8x32,
) -> u16x16
pub fn multisum_of_absolute_differences_u8x32<const OFFSETS: i32>( self, a: u8x32, b: u8x32, ) -> u16x16
See _mm256_mpsadbw_epu8.
Sourcepub fn negate_mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn negate_mul_add_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a and b, negates the results, and adds them to
each lane of c.
Sourcepub fn negate_mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn negate_mul_add_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a and b, negates the results, and adds them to
each lane of c.
Sourcepub fn negate_mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn negate_mul_add_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a and b, negates the results, and adds them to
each lane of c.
Sourcepub fn negate_mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn negate_mul_add_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a and b, negates the results, and adds them to
each lane of c.
Sourcepub fn negate_mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
pub fn negate_mul_sub_f32x4(self, a: f32x4, b: f32x4, c: f32x4) -> f32x4
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the negation of the results.
Sourcepub fn negate_mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
pub fn negate_mul_sub_f32x8(self, a: f32x8, b: f32x8, c: f32x8) -> f32x8
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the negation of the results.
Sourcepub fn negate_mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
pub fn negate_mul_sub_f64x2(self, a: f64x2, b: f64x2, c: f64x2) -> f64x2
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the negation of the results.
Sourcepub fn negate_mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
pub fn negate_mul_sub_f64x4(self, a: f64x4, b: f64x4, c: f64x4) -> f64x4
Multiplies the elements in each lane of a and b, and subtracts each lane of c from
the negation of the results.
Sourcepub fn not_i16x16(self, a: i16x16) -> i16x16
pub fn not_i16x16(self, a: i16x16) -> i16x16
Returns !a for each bit in a.
Sourcepub fn not_m16x16(self, a: m16x16) -> m16x16
pub fn not_m16x16(self, a: m16x16) -> m16x16
Returns !a for each bit in a.
Sourcepub fn not_u16x16(self, a: u16x16) -> u16x16
pub fn not_u16x16(self, a: u16x16) -> u16x16
Returns !a for each bit in a.
Sourcepub fn pack_with_signed_saturation_i16x16(self, a: i16x16, b: i16x16) -> i8x32
pub fn pack_with_signed_saturation_i16x16(self, a: i16x16, b: i16x16) -> i8x32
See _mm256_packs_epi16.
Sourcepub fn pack_with_signed_saturation_i32x8(self, a: i32x8, b: i32x8) -> i16x16
pub fn pack_with_signed_saturation_i32x8(self, a: i32x8, b: i32x8) -> i16x16
See _mm256_packs_epi32.
Sourcepub fn pack_with_unsigned_saturation_i16x16(self, a: i16x16, b: i16x16) -> u8x32
pub fn pack_with_unsigned_saturation_i16x16(self, a: i16x16, b: i16x16) -> u8x32
See _mm256_packus_epi16.
Sourcepub fn pack_with_unsigned_saturation_i32x8(self, a: i32x8, b: i32x8) -> u16x16
pub fn pack_with_unsigned_saturation_i32x8(self, a: i32x8, b: i32x8) -> u16x16
See _mm256_packus_epi32.
Sourcepub fn round_f32x8(self, a: f32x8) -> f32x8
pub fn round_f32x8(self, a: f32x8) -> f32x8
Rounds the elements of each lane of a to the nearest integer. If two values are equally
close, the even value is returned.
Sourcepub fn round_f64x4(self, a: f64x4) -> f64x4
pub fn round_f64x4(self, a: f64x4) -> f64x4
Rounds the elements of each lane of a to the nearest integer. If two values are equally
close, the even value is returned.
Sourcepub fn saturating_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn saturating_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Adds the elements of each lane of a and b, with saturation.
Sourcepub fn saturating_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn saturating_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Adds the elements of each lane of a and b, with saturation.
Sourcepub fn saturating_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn saturating_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Adds the elements of each lane of a and b, with saturation.
Sourcepub fn saturating_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn saturating_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Adds the elements of each lane of a and b, with saturation.
Sourcepub fn saturating_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn saturating_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Subtracts the elements of each lane of a and b, with saturation.
Sourcepub fn saturating_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn saturating_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Subtracts the elements of each lane of a and b, with saturation.
Sourcepub fn saturating_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn saturating_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Subtracts the elements of each lane of a and b, with saturation.
Sourcepub fn saturating_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn saturating_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Subtracts the elements of each lane of a and b, with saturation.
Sourcepub fn select_const_f32x8<const MASK8: i32>(
self,
if_true: f32x8,
if_false: f32x8,
) -> f32x8
pub fn select_const_f32x8<const MASK8: i32>( self, if_true: f32x8, if_false: f32x8, ) -> f32x8
Combines if_true and if_false, selecting elements from if_true if the corresponding
bit in the mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_const_f64x4<const MASK4: i32>(
self,
if_true: f64x4,
if_false: f64x4,
) -> f64x4
pub fn select_const_f64x4<const MASK4: i32>( self, if_true: f64x4, if_false: f64x4, ) -> f64x4
Combines if_true and if_false, selecting elements from if_true if the corresponding
bit in the mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_const_i32x8<const MASK8: i32>(
self,
if_true: i32x8,
if_false: i32x8,
) -> i32x8
pub fn select_const_i32x8<const MASK8: i32>( self, if_true: i32x8, if_false: i32x8, ) -> i32x8
Combines if_true and if_false, selecting elements from if_true if the corresponding
bit in the mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_const_i64x4<const MASK4: i32>(
self,
if_true: i64x4,
if_false: i64x4,
) -> i64x4
pub fn select_const_i64x4<const MASK4: i32>( self, if_true: i64x4, if_false: i64x4, ) -> i64x4
Combines if_true and if_false, selecting elements from if_true if the corresponding
bit in the mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_const_u32x8<const MASK8: i32>(
self,
if_true: u32x8,
if_false: u32x8,
) -> u32x8
pub fn select_const_u32x8<const MASK8: i32>( self, if_true: u32x8, if_false: u32x8, ) -> u32x8
Combines if_true and if_false, selecting elements from if_true if the corresponding
bit in the mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_const_u64x4<const MASK4: i32>(
self,
if_true: u64x4,
if_false: u64x4,
) -> u64x4
pub fn select_const_u64x4<const MASK4: i32>( self, if_true: u64x4, if_false: u64x4, ) -> u64x4
Combines if_true and if_false, selecting elements from if_true if the corresponding
bit in the mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_f32x8(self, mask: m32x8, if_true: f32x8, if_false: f32x8) -> f32x8
pub fn select_f32x8(self, mask: m32x8, if_true: f32x8, if_false: f32x8) -> f32x8
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_f64x4(self, mask: m64x4, if_true: f64x4, if_false: f64x4) -> f64x4
pub fn select_f64x4(self, mask: m64x4, if_true: f64x4, if_false: f64x4) -> f64x4
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_i16x16(
self,
mask: m16x16,
if_true: i16x16,
if_false: i16x16,
) -> i16x16
pub fn select_i16x16( self, mask: m16x16, if_true: i16x16, if_false: i16x16, ) -> i16x16
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_i32x8(self, mask: m32x8, if_true: i32x8, if_false: i32x8) -> i32x8
pub fn select_i32x8(self, mask: m32x8, if_true: i32x8, if_false: i32x8) -> i32x8
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_i64x4(self, mask: m64x4, if_true: i64x4, if_false: i64x4) -> i64x4
pub fn select_i64x4(self, mask: m64x4, if_true: i64x4, if_false: i64x4) -> i64x4
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_i8x32(self, mask: m8x32, if_true: i8x32, if_false: i8x32) -> i8x32
pub fn select_i8x32(self, mask: m8x32, if_true: i8x32, if_false: i8x32) -> i8x32
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_u16x16(
self,
mask: m16x16,
if_true: u16x16,
if_false: u16x16,
) -> u16x16
pub fn select_u16x16( self, mask: m16x16, if_true: u16x16, if_false: u16x16, ) -> u16x16
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_u32x8(self, mask: m32x8, if_true: u32x8, if_false: u32x8) -> u32x8
pub fn select_u32x8(self, mask: m32x8, if_true: u32x8, if_false: u32x8) -> u32x8
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_u64x4(self, mask: m64x4, if_true: u64x4, if_false: u64x4) -> u64x4
pub fn select_u64x4(self, mask: m64x4, if_true: u64x4, if_false: u64x4) -> u64x4
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn select_u8x32(self, mask: m8x32, if_true: u8x32, if_false: u8x32) -> u8x32
pub fn select_u8x32(self, mask: m8x32, if_true: u8x32, if_false: u8x32) -> u8x32
Combines if_true and if_false, selecting elements from if_true if the corresponding
mask in mask is set, otherwise selecting elements from if_false.
Sourcepub fn shl_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
pub fn shl_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
Shift the bits of each lane of a to the left by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
pub fn shl_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
Shift the bits of each lane of a to the left by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_const_i64x4<const AMOUNT: i32>(self, a: i64x4) -> i64x4
pub fn shl_const_i64x4<const AMOUNT: i32>(self, a: i64x4) -> i64x4
Shift the bits of each lane of a to the left by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
pub fn shl_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
Shift the bits of each lane of a to the left by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
pub fn shl_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
Shift the bits of each lane of a to the left by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
pub fn shl_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
Shift the bits of each lane of a to the left by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_i32x4(self, a: i32x4, amount: u32x4) -> i32x4
pub fn shl_dyn_i32x4(self, a: i32x4, amount: u32x4) -> i32x4
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_i32x8(self, a: i32x8, amount: u32x8) -> i32x8
pub fn shl_dyn_i32x8(self, a: i32x8, amount: u32x8) -> i32x8
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_i64x2(self, a: i64x2, amount: u64x2) -> i64x2
pub fn shl_dyn_i64x2(self, a: i64x2, amount: u64x2) -> i64x2
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_i64x4(self, a: i64x4, amount: u64x4) -> i64x4
pub fn shl_dyn_i64x4(self, a: i64x4, amount: u64x4) -> i64x4
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
pub fn shl_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
pub fn shl_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
pub fn shl_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
pub fn shl_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
Shift the bits of each lane of a to the left by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
pub fn shl_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
Shift the bits of each lane of a to the left by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
pub fn shl_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
Shift the bits of each lane of a to the left by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_i64x4(self, a: i64x4, amount: u64x2) -> i64x4
pub fn shl_i64x4(self, a: i64x4, amount: u64x2) -> i64x4
Shift the bits of each lane of a to the left by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
pub fn shl_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
Shift the bits of each lane of a to the left by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
pub fn shl_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
Shift the bits of each lane of a to the left by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shl_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
pub fn shl_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
Shift the bits of each lane of a to the left by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
pub fn shr_const_i16x16<const AMOUNT: i32>(self, a: i16x16) -> i16x16
Shift the bits of each lane of a to the right by AMOUNT, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1 if the sign bit is set.
Sourcepub fn shr_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
pub fn shr_const_i32x8<const AMOUNT: i32>(self, a: i32x8) -> i32x8
Shift the bits of each lane of a to the right by AMOUNT, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1 if the sign bit is set.
Sourcepub fn shr_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
pub fn shr_const_u16x16<const AMOUNT: i32>(self, a: u16x16) -> u16x16
Shift the bits of each lane of a to the right by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
pub fn shr_const_u32x8<const AMOUNT: i32>(self, a: u32x8) -> u32x8
Shift the bits of each lane of a to the right by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
pub fn shr_const_u64x4<const AMOUNT: i32>(self, a: u64x4) -> u64x4
Shift the bits of each lane of a to the right by AMOUNT, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_dyn_i32x4(self, a: i32x4, amount: i32x4) -> i32x4
pub fn shr_dyn_i32x4(self, a: i32x4, amount: i32x4) -> i32x4
Shift the bits of each lane of a to the right by the element in the corresponding lane in
amount, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1 if the sign bit is set.
Sourcepub fn shr_dyn_i32x8(self, a: i32x8, amount: i32x8) -> i32x8
pub fn shr_dyn_i32x8(self, a: i32x8, amount: i32x8) -> i32x8
Shift the bits of each lane of a to the right by the element in the corresponding lane in
amount, while shifting in sign bits.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1 if the sign bit is set.
Sourcepub fn shr_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
pub fn shr_dyn_u32x4(self, a: u32x4, amount: u32x4) -> u32x4
Shift the bits of each lane of a to the right by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
pub fn shr_dyn_u32x8(self, a: u32x8, amount: u32x8) -> u32x8
Shift the bits of each lane of a to the right by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
pub fn shr_dyn_u64x2(self, a: u64x2, amount: u64x2) -> u64x2
Shift the bits of each lane of a to the right by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
pub fn shr_dyn_u64x4(self, a: u64x4, amount: u64x4) -> u64x4
Shift the bits of each lane of a to the right by the element in the corresponding lane in
amount, while shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
pub fn shr_i16x16(self, a: i16x16, amount: u64x2) -> i16x16
Shift the bits of each lane of a to the right by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1 if the sign bit is set.
Sourcepub fn shr_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
pub fn shr_i32x8(self, a: i32x8, amount: u64x2) -> i32x8
Shift the bits of each lane of a to the right by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero if the
sign bit is not set, and to -1 if the sign bit is set.
Sourcepub fn shr_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
pub fn shr_u16x16(self, a: u16x16, amount: u64x2) -> u16x16
Shift the bits of each lane of a to the right by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
pub fn shr_u32x8(self, a: u32x8, amount: u64x2) -> u32x8
Shift the bits of each lane of a to the right by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn shr_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
pub fn shr_u64x4(self, a: u64x4, amount: u64x2) -> u64x4
Shift the bits of each lane of a to the right by the first element in amount, while
shifting in zeros.
Shifting by a value greater than the bit width of the type sets the result to zero.
Sourcepub fn splat_f32x8(self, value: f32) -> f32x8
pub fn splat_f32x8(self, value: f32) -> f32x8
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_f64x4(self, value: f64) -> f64x4
pub fn splat_f64x4(self, value: f64) -> f64x4
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_i16x16(self, value: i16) -> i16x16
pub fn splat_i16x16(self, value: i16) -> i16x16
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_i32x8(self, value: i32) -> i32x8
pub fn splat_i32x8(self, value: i32) -> i32x8
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_i64x4(self, value: i64) -> i64x4
pub fn splat_i64x4(self, value: i64) -> i64x4
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_i8x32(self, value: i8) -> i8x32
pub fn splat_i8x32(self, value: i8) -> i8x32
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_m16x16(self, value: m16) -> m16x16
pub fn splat_m16x16(self, value: m16) -> m16x16
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_m32x8(self, value: m32) -> m32x8
pub fn splat_m32x8(self, value: m32) -> m32x8
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_m64x4(self, value: m64) -> m64x4
pub fn splat_m64x4(self, value: m64) -> m64x4
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_m8x32(self, value: m8) -> m8x32
pub fn splat_m8x32(self, value: m8) -> m8x32
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_u16x16(self, value: u16) -> u16x16
pub fn splat_u16x16(self, value: u16) -> u16x16
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_u32x8(self, value: u32) -> u32x8
pub fn splat_u32x8(self, value: u32) -> u32x8
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_u64x4(self, value: u64) -> u64x4
pub fn splat_u64x4(self, value: u64) -> u64x4
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn splat_u8x32(self, value: u8) -> u8x32
pub fn splat_u8x32(self, value: u8) -> u8x32
Returns a SIMD vector with all lanes set to the given value.
Sourcepub fn sqrt_f32x8(self, a: f32x8) -> f32x8
pub fn sqrt_f32x8(self, a: f32x8) -> f32x8
Computes the square roots of the elements of each lane of a.
Sourcepub fn sqrt_f64x4(self, a: f64x4) -> f64x4
pub fn sqrt_f64x4(self, a: f64x4) -> f64x4
Computes the square roots of the elements of each lane of a.
Sourcepub fn subadd_f32x8(self, a: f32x8, b: f32x8) -> f32x8
pub fn subadd_f32x8(self, a: f32x8, b: f32x8) -> f32x8
Alternatively subtracts and adds the elements of each lane of a and b.
Sourcepub fn subadd_f64x4(self, a: f64x4, b: f64x4) -> f64x4
pub fn subadd_f64x4(self, a: f64x4, b: f64x4) -> f64x4
Alternatively subtracts and adds the elements of each lane of a and b.
Sourcepub fn sum_of_absolute_differences_u8x32(self, a: u8x32, b: u8x32) -> u64x4
pub fn sum_of_absolute_differences_u8x32(self, a: u8x32, b: u8x32) -> u64x4
See _mm256_sad_epu8.
Sourcepub fn truncate_f32x8(self, a: f32x8) -> f32x8
pub fn truncate_f32x8(self, a: f32x8) -> f32x8
Rounds the elements of each lane of a to the nearest integer towards zero.
Sourcepub fn truncate_f64x4(self, a: f64x4) -> f64x4
pub fn truncate_f64x4(self, a: f64x4) -> f64x4
Rounds the elements of each lane of a to the nearest integer towards zero.
Sourcepub fn unsigned_abs_i16x16(self, a: i16x16) -> u16x16
pub fn unsigned_abs_i16x16(self, a: i16x16) -> u16x16
Computes the unsigned absolute value of the elements of each lane of a.
Sourcepub fn unsigned_abs_i32x8(self, a: i32x8) -> u32x8
pub fn unsigned_abs_i32x8(self, a: i32x8) -> u32x8
Computes the unsigned absolute value of the elements of each lane of a.
Sourcepub fn unsigned_abs_i8x32(self, a: i8x32) -> u8x32
pub fn unsigned_abs_i8x32(self, a: i8x32) -> u8x32
Computes the unsigned absolute value of the elements of each lane of a.
Sourcepub fn widening_mul_i16x16(self, a: i16x16, b: i16x16) -> (i16x16, i16x16)
pub fn widening_mul_i16x16(self, a: i16x16, b: i16x16) -> (i16x16, i16x16)
Multiplies the elements of each lane of a and b, and returns separately the low and
high bits of the result.
Sourcepub fn widening_mul_i32x8(self, a: i32x8, b: i32x8) -> (i32x8, i32x8)
pub fn widening_mul_i32x8(self, a: i32x8, b: i32x8) -> (i32x8, i32x8)
Multiplies the elements of each lane of a and b, and returns separately the low and
high bits of the result.
Sourcepub fn widening_mul_u16x16(self, a: u16x16, b: u16x16) -> (u16x16, u16x16)
pub fn widening_mul_u16x16(self, a: u16x16, b: u16x16) -> (u16x16, u16x16)
Multiplies the elements of each lane of a and b, and returns separately the low and
high bits of the result.
Sourcepub fn widening_mul_u32x8(self, a: u32x8, b: u32x8) -> (u32x8, u32x8)
pub fn widening_mul_u32x8(self, a: u32x8, b: u32x8) -> (u32x8, u32x8)
Multiplies the elements of each lane of a and b, and returns separately the low and
high bits of the result.
Sourcepub fn wrapping_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn wrapping_add_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_add_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn wrapping_add_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_add_i64x4(self, a: i64x4, b: i64x4) -> i64x4
pub fn wrapping_add_i64x4(self, a: i64x4, b: i64x4) -> i64x4
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn wrapping_add_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn wrapping_add_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_add_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn wrapping_add_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_add_u64x4(self, a: u64x4, b: u64x4) -> u64x4
pub fn wrapping_add_u64x4(self, a: u64x4, b: u64x4) -> u64x4
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn wrapping_add_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Adds the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_mul_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn wrapping_mul_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Multiplies the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_mul_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn wrapping_mul_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Multiplies the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_mul_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn wrapping_mul_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Multiplies the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_mul_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn wrapping_mul_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Multiplies the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn wrapping_sub_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_i32x8(self, a: i32x8, b: i32x8) -> i32x8
pub fn wrapping_sub_i32x8(self, a: i32x8, b: i32x8) -> i32x8
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_i64x4(self, a: i64x4, b: i64x4) -> i64x4
pub fn wrapping_sub_i64x4(self, a: i64x4, b: i64x4) -> i64x4
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
pub fn wrapping_sub_i8x32(self, a: i8x32, b: i8x32) -> i8x32
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn wrapping_sub_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_u32x8(self, a: u32x8, b: u32x8) -> u32x8
pub fn wrapping_sub_u32x8(self, a: u32x8, b: u32x8) -> u32x8
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_u64x4(self, a: u64x4, b: u64x4) -> u64x4
pub fn wrapping_sub_u64x4(self, a: u64x4, b: u64x4) -> u64x4
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn wrapping_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
pub fn wrapping_sub_u8x32(self, a: u8x32, b: u8x32) -> u8x32
Subtracts the elements of each lane of a and b, with wrapping on overflow.
Sourcepub fn xor_i16x16(self, a: i16x16, b: i16x16) -> i16x16
pub fn xor_i16x16(self, a: i16x16, b: i16x16) -> i16x16
Returns a ^ b for each bit in a and b.
Sourcepub fn xor_m16x16(self, a: m16x16, b: m16x16) -> m16x16
pub fn xor_m16x16(self, a: m16x16, b: m16x16) -> m16x16
Returns a ^ b for each bit in a and b.
Sourcepub fn xor_u16x16(self, a: u16x16, b: u16x16) -> u16x16
pub fn xor_u16x16(self, a: u16x16, b: u16x16) -> u16x16
Returns a ^ b for each bit in a and b.
Trait Implementations§
Source§impl Simd for V3
impl Simd for V3
Source§unsafe fn mask_load_ptr_c32s(
self,
mask: MemMask<Self::m32s>,
ptr: *const c32,
) -> Self::c32s
unsafe fn mask_load_ptr_c32s( self, mask: MemMask<Self::m32s>, ptr: *const c32, ) -> Self::c32s
§Safety
See the trait-level safety documentation.
Source§unsafe fn mask_load_ptr_c64s(
self,
mask: MemMask<Self::m64s>,
ptr: *const c64,
) -> Self::c64s
unsafe fn mask_load_ptr_c64s( self, mask: MemMask<Self::m64s>, ptr: *const c64, ) -> Self::c64s
§Safety
See the trait-level safety documentation.
Source§unsafe fn mask_load_ptr_u32s(
self,
mask: MemMask<Self::m32s>,
ptr: *const u32,
) -> Self::u32s
unsafe fn mask_load_ptr_u32s( self, mask: MemMask<Self::m32s>, ptr: *const u32, ) -> Self::u32s
§Safety
See the trait-level safety documentation.
Source§unsafe fn mask_load_ptr_u64s(
self,
mask: MemMask<Self::m64s>,
ptr: *const u64,
) -> Self::u64s
unsafe fn mask_load_ptr_u64s( self, mask: MemMask<Self::m64s>, ptr: *const u64, ) -> Self::u64s
§Safety
See the trait-level safety documentation.
Source§unsafe fn mask_store_ptr_c32s(
self,
mask: MemMask<Self::m32s>,
ptr: *mut c32,
values: Self::c32s,
)
unsafe fn mask_store_ptr_c32s( self, mask: MemMask<Self::m32s>, ptr: *mut c32, values: Self::c32s, )
§Safety
See the trait-level safety documentation.
Source§unsafe fn mask_store_ptr_c64s(
self,
mask: MemMask<Self::m64s>,
ptr: *mut c64,
values: Self::c64s,
)
unsafe fn mask_store_ptr_c64s( self, mask: MemMask<Self::m64s>, ptr: *mut c64, values: Self::c64s, )
§Safety
See the trait-level safety documentation.
Source§unsafe fn mask_store_ptr_u32s(
self,
mask: MemMask<Self::m32s>,
ptr: *mut u32,
values: Self::u32s,
)
unsafe fn mask_store_ptr_u32s( self, mask: MemMask<Self::m32s>, ptr: *mut u32, values: Self::u32s, )
§Safety
See the trait-level safety documentation.
Source§unsafe fn mask_store_ptr_u64s(
self,
mask: MemMask<Self::m64s>,
ptr: *mut u64,
values: Self::u64s,
)
unsafe fn mask_store_ptr_u64s( self, mask: MemMask<Self::m64s>, ptr: *mut u64, values: Self::u64s, )
§Safety
See the trait-level safety documentation.
const REGISTER_COUNT: usize = 16usize
type c32s = f32x8
type c64s = f64x4
type f32s = f32x8
type f64s = f64x4
type i32s = i32x8
type i64s = i64x4
type m32s = m32x8
type m64s = m64x4
type u32s = u32x8
type u64s = u64x4
Source§fn abs2_c32s(self, a: Self::c32s) -> Self::c32s
fn abs2_c32s(self, a: Self::c32s) -> Self::c32s
Source§fn abs2_c64s(self, a: Self::c64s) -> Self::c64s
fn abs2_c64s(self, a: Self::c64s) -> Self::c64s
Source§fn abs_max_c32s(self, a: Self::c32s) -> Self::c32s
fn abs_max_c32s(self, a: Self::c32s) -> Self::c32s
Source§fn abs_max_c64s(self, a: Self::c64s) -> Self::c64s
fn abs_max_c64s(self, a: Self::c64s) -> Self::c64s
fn add_c32s(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn add_c64s(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
fn add_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn add_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn add_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn add_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn and_m32s(self, a: Self::m32s, b: Self::m32s) -> Self::m32s
fn and_m64s(self, a: Self::m64s, b: Self::m64s) -> Self::m64s
fn and_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn and_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn conj_c32s(self, a: Self::c32s) -> Self::c32s
fn conj_c64s(self, a: Self::c64s) -> Self::c64s
fn conj_mul_add_c32s( self, a: Self::c32s, b: Self::c32s, c: Self::c32s, ) -> Self::c32s
fn conj_mul_add_c64s( self, a: Self::c64s, b: Self::c64s, c: Self::c64s, ) -> Self::c64s
fn conj_mul_c32s(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn conj_mul_c64s(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
fn deinterleave_shfl_f32s<T: Interleave>(self, values: T) -> T
fn deinterleave_shfl_f64s<T: Interleave>(self, values: T) -> T
fn div_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn div_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn equal_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn equal_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn greater_than_or_equal_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn greater_than_or_equal_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn greater_than_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn greater_than_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn interleave_shfl_f32s<T: Interleave>(self, values: T) -> T
fn interleave_shfl_f64s<T: Interleave>(self, values: T) -> T
fn less_than_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn less_than_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn less_than_or_equal_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn less_than_or_equal_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn less_than_or_equal_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn less_than_or_equal_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn less_than_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::m32s
fn less_than_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::m64s
fn mask_between_m32s(self, start: u32, end: u32) -> MemMask<Self::m32s>
fn mask_between_m64s(self, start: u64, end: u64) -> MemMask<Self::m64s>
fn max_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn max_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn min_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn min_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn mul_add_c32s(self, a: Self::c32s, b: Self::c32s, c: Self::c32s) -> Self::c32s
fn mul_add_c64s(self, a: Self::c64s, b: Self::c64s, c: Self::c64s) -> Self::c64s
fn mul_add_e_f32s( self, a: Self::f32s, b: Self::f32s, c: Self::f32s, ) -> Self::f32s
fn mul_add_e_f64s( self, a: Self::f64s, b: Self::f64s, c: Self::f64s, ) -> Self::f64s
fn mul_add_f32s(self, a: Self::f32s, b: Self::f32s, c: Self::f32s) -> Self::f32s
fn mul_add_f64s(self, a: Self::f64s, b: Self::f64s, c: Self::f64s) -> Self::f64s
fn mul_c32s(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn mul_c64s(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
fn mul_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn mul_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn neg_c32s(self, a: Self::c32s) -> Self::c32s
fn neg_c64s(self, a: Self::c64s) -> Self::c64s
fn not_m32s(self, a: Self::m32s) -> Self::m32s
fn not_m64s(self, a: Self::m64s) -> Self::m64s
fn not_u32s(self, a: Self::u32s) -> Self::u32s
fn not_u64s(self, a: Self::u64s) -> Self::u64s
fn or_m32s(self, a: Self::m32s, b: Self::m32s) -> Self::m32s
fn or_m64s(self, a: Self::m64s, b: Self::m64s) -> Self::m64s
fn or_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn or_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn partial_load_u32s(self, slice: &[u32]) -> Self::u32s
fn partial_load_u64s(self, slice: &[u64]) -> Self::u64s
fn partial_store_u32s(self, slice: &mut [u32], values: Self::u32s)
fn partial_store_u64s(self, slice: &mut [u64], values: Self::u64s)
fn reduce_max_c32s(self, a: Self::c32s) -> c32
fn reduce_max_c64s(self, a: Self::c64s) -> c64
fn reduce_max_f32s(self, a: Self::f32s) -> f32
fn reduce_max_f64s(self, a: Self::f64s) -> f64
fn reduce_min_c32s(self, a: Self::c32s) -> c32
fn reduce_min_c64s(self, a: Self::c64s) -> c64
fn reduce_min_f32s(self, a: Self::f32s) -> f32
fn reduce_min_f64s(self, a: Self::f64s) -> f64
fn reduce_product_f32s(self, a: Self::f32s) -> f32
fn reduce_product_f64s(self, a: Self::f64s) -> f64
fn reduce_sum_c32s(self, a: Self::c32s) -> c32
fn reduce_sum_c64s(self, a: Self::c64s) -> c64
fn reduce_sum_f32s(self, a: Self::f32s) -> f32
fn reduce_sum_f64s(self, a: Self::f64s) -> f64
fn rotate_right_c32s(self, a: Self::c32s, amount: usize) -> Self::c32s
fn rotate_right_c64s(self, a: Self::c64s, amount: usize) -> Self::c64s
fn rotate_right_u32s(self, a: Self::u32s, amount: usize) -> Self::u32s
fn rotate_right_u64s(self, a: Self::u64s, amount: usize) -> Self::u64s
fn select_u32s_m32s( self, mask: Self::m32s, if_true: Self::u32s, if_false: Self::u32s, ) -> Self::u32s
fn select_u64s_m64s( self, mask: Self::m64s, if_true: Self::u64s, if_false: Self::u64s, ) -> Self::u64s
fn splat_c32s(self, value: c32) -> Self::c32s
fn splat_c64s(self, value: c64) -> Self::c64s
fn splat_f32s(self, value: f32) -> Self::f32s
fn splat_f64s(self, value: f64) -> Self::f64s
fn splat_u32s(self, value: u32) -> Self::u32s
fn splat_u64s(self, value: u64) -> Self::u64s
fn sub_c32s(self, a: Self::c32s, b: Self::c32s) -> Self::c32s
fn sub_c64s(self, a: Self::c64s, b: Self::c64s) -> Self::c64s
fn sub_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn sub_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn sub_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn sub_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn swap_re_im_c32s(self, a: Self::c32s) -> Self::c32s
fn swap_re_im_c64s(self, a: Self::c64s) -> Self::c64s
fn vectorize<Op: WithSimd>(self, op: Op) -> Op::Output
fn widening_mul_u32s( self, a: Self::u32s, b: Self::u32s, ) -> (Self::u32s, Self::u32s)
fn wrapping_dyn_shl_u32s(self, a: Self::u32s, amount: Self::u32s) -> Self::u32s
fn wrapping_dyn_shr_u32s(self, a: Self::u32s, amount: Self::u32s) -> Self::u32s
fn xor_m32s(self, a: Self::m32s, b: Self::m32s) -> Self::m32s
fn xor_m64s(self, a: Self::m64s, b: Self::m64s) -> Self::m64s
fn xor_u32s(self, a: Self::u32s, b: Self::u32s) -> Self::u32s
fn xor_u64s(self, a: Self::u64s, b: Self::u64s) -> Self::u64s
fn greater_than_or_equal_i32s(self, a: Self::i32s, b: Self::i32s) -> Self::m32s
fn greater_than_or_equal_i64s(self, a: Self::i64s, b: Self::i64s) -> Self::m64s
fn greater_than_i32s(self, a: Self::i32s, b: Self::i32s) -> Self::m32s
fn greater_than_i64s(self, a: Self::i64s, b: Self::i64s) -> Self::m64s
fn less_than_or_equal_i32s(self, a: Self::i32s, b: Self::i32s) -> Self::m32s
fn less_than_or_equal_i64s(self, a: Self::i64s, b: Self::i64s) -> Self::m64s
fn less_than_i32s(self, a: Self::i32s, b: Self::i32s) -> Self::m32s
fn less_than_i64s(self, a: Self::i64s, b: Self::i64s) -> Self::m64s
const IS_SCALAR: bool = false
const U64_LANES: usize = _
const I64_LANES: usize = _
const F64_LANES: usize = _
const C64_LANES: usize = _
const U32_LANES: usize = _
const I32_LANES: usize = _
const F32_LANES: usize = _
const C32_LANES: usize = _
fn abs_f32s(self, a: Self::f32s) -> Self::f32s
fn abs_f64s(self, a: Self::f64s) -> Self::f64s
fn add_i32s(self, a: Self::i32s, b: Self::i32s) -> Self::i32s
fn add_i64s(self, a: Self::i64s, b: Self::i64s) -> Self::i64s
fn and_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::f32s
fn and_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::f64s
fn and_i32s(self, a: Self::i32s, b: Self::i32s) -> Self::i32s
fn and_i64s(self, a: Self::i64s, b: Self::i64s) -> Self::i64s
fn as_mut_rsimd_c32s(slice: &mut [c32]) -> (&mut [c32], &mut [Self::c32s])
fn as_mut_rsimd_c64s(slice: &mut [c64]) -> (&mut [c64], &mut [Self::c64s])
fn as_mut_rsimd_f32s(slice: &mut [f32]) -> (&mut [f32], &mut [Self::f32s])
fn as_mut_rsimd_f64s(slice: &mut [f64]) -> (&mut [f64], &mut [Self::f64s])
fn as_mut_rsimd_i32s(slice: &mut [i32]) -> (&mut [i32], &mut [Self::i32s])
fn as_mut_rsimd_i64s(slice: &mut [i64]) -> (&mut [i64], &mut [Self::i64s])
fn as_mut_rsimd_u32s(slice: &mut [u32]) -> (&mut [u32], &mut [Self::u32s])
fn as_mut_rsimd_u64s(slice: &mut [u64]) -> (&mut [u64], &mut [Self::u64s])
fn as_mut_simd_c32s(slice: &mut [c32]) -> (&mut [Self::c32s], &mut [c32])
fn as_mut_simd_c64s(slice: &mut [c64]) -> (&mut [Self::c64s], &mut [c64])
fn as_mut_simd_f32s(slice: &mut [f32]) -> (&mut [Self::f32s], &mut [f32])
fn as_mut_simd_f64s(slice: &mut [f64]) -> (&mut [Self::f64s], &mut [f64])
fn as_mut_simd_i32s(slice: &mut [i32]) -> (&mut [Self::i32s], &mut [i32])
fn as_mut_simd_i64s(slice: &mut [i64]) -> (&mut [Self::i64s], &mut [i64])
fn as_mut_simd_u32s(slice: &mut [u32]) -> (&mut [Self::u32s], &mut [u32])
fn as_mut_simd_u64s(slice: &mut [u64]) -> (&mut [Self::u64s], &mut [u64])
fn as_rsimd_c32s(slice: &[c32]) -> (&[c32], &[Self::c32s])
fn as_rsimd_c64s(slice: &[c64]) -> (&[c64], &[Self::c64s])
fn as_rsimd_f32s(slice: &[f32]) -> (&[f32], &[Self::f32s])
fn as_rsimd_f64s(slice: &[f64]) -> (&[f64], &[Self::f64s])
fn as_rsimd_i32s(slice: &[i32]) -> (&[i32], &[Self::i32s])
fn as_rsimd_i64s(slice: &[i64]) -> (&[i64], &[Self::i64s])
fn as_rsimd_u32s(slice: &[u32]) -> (&[u32], &[Self::u32s])
fn as_rsimd_u64s(slice: &[u64]) -> (&[u64], &[Self::u64s])
fn as_simd_c32s(slice: &[c32]) -> (&[Self::c32s], &[c32])
fn as_simd_c64s(slice: &[c64]) -> (&[Self::c64s], &[c64])
fn as_simd_f32s(slice: &[f32]) -> (&[Self::f32s], &[f32])
fn as_simd_f64s(slice: &[f64]) -> (&[Self::f64s], &[f64])
fn as_simd_i32s(slice: &[i32]) -> (&[Self::i32s], &[i32])
fn as_simd_i64s(slice: &[i64]) -> (&[Self::i64s], &[i64])
fn as_simd_u32s(slice: &[u32]) -> (&[Self::u32s], &[u32])
fn as_simd_u64s(slice: &[u64]) -> (&[Self::u64s], &[u64])
fn as_uninit_mut_rsimd_c32s( slice: &mut [MaybeUninit<c32>], ) -> (&mut [MaybeUninit<c32>], &mut [MaybeUninit<Self::c32s>])
fn as_uninit_mut_rsimd_c64s( slice: &mut [MaybeUninit<c64>], ) -> (&mut [MaybeUninit<c64>], &mut [MaybeUninit<Self::c64s>])
fn as_uninit_mut_rsimd_f32s( slice: &mut [MaybeUninit<f32>], ) -> (&mut [MaybeUninit<f32>], &mut [MaybeUninit<Self::f32s>])
fn as_uninit_mut_rsimd_f64s( slice: &mut [MaybeUninit<f64>], ) -> (&mut [MaybeUninit<f64>], &mut [MaybeUninit<Self::f64s>])
fn as_uninit_mut_rsimd_i32s( slice: &mut [MaybeUninit<i32>], ) -> (&mut [MaybeUninit<i32>], &mut [MaybeUninit<Self::i32s>])
fn as_uninit_mut_rsimd_i64s( slice: &mut [MaybeUninit<i64>], ) -> (&mut [MaybeUninit<i64>], &mut [MaybeUninit<Self::i64s>])
fn as_uninit_mut_rsimd_u32s( slice: &mut [MaybeUninit<u32>], ) -> (&mut [MaybeUninit<u32>], &mut [MaybeUninit<Self::u32s>])
fn as_uninit_mut_rsimd_u64s( slice: &mut [MaybeUninit<u64>], ) -> (&mut [MaybeUninit<u64>], &mut [MaybeUninit<Self::u64s>])
fn as_uninit_mut_simd_c32s( slice: &mut [MaybeUninit<c32>], ) -> (&mut [MaybeUninit<Self::c32s>], &mut [MaybeUninit<c32>])
fn as_uninit_mut_simd_c64s( slice: &mut [MaybeUninit<c64>], ) -> (&mut [MaybeUninit<Self::c64s>], &mut [MaybeUninit<c64>])
fn as_uninit_mut_simd_f32s( slice: &mut [MaybeUninit<f32>], ) -> (&mut [MaybeUninit<Self::f32s>], &mut [MaybeUninit<f32>])
fn as_uninit_mut_simd_f64s( slice: &mut [MaybeUninit<f64>], ) -> (&mut [MaybeUninit<Self::f64s>], &mut [MaybeUninit<f64>])
fn as_uninit_mut_simd_i32s( slice: &mut [MaybeUninit<i32>], ) -> (&mut [MaybeUninit<Self::i32s>], &mut [MaybeUninit<i32>])
fn as_uninit_mut_simd_i64s( slice: &mut [MaybeUninit<i64>], ) -> (&mut [MaybeUninit<Self::i64s>], &mut [MaybeUninit<i64>])
fn as_uninit_mut_simd_u32s( slice: &mut [MaybeUninit<u32>], ) -> (&mut [MaybeUninit<Self::u32s>], &mut [MaybeUninit<u32>])
fn as_uninit_mut_simd_u64s( slice: &mut [MaybeUninit<u64>], ) -> (&mut [MaybeUninit<Self::u64s>], &mut [MaybeUninit<u64>])
Source§fn conj_mul_add_e_c32s(
self,
a: Self::c32s,
b: Self::c32s,
c: Self::c32s,
) -> Self::c32s
fn conj_mul_add_e_c32s( self, a: Self::c32s, b: Self::c32s, c: Self::c32s, ) -> Self::c32s
conj(a) * b + cSource§fn conj_mul_add_e_c64s(
self,
a: Self::c64s,
b: Self::c64s,
c: Self::c64s,
) -> Self::c64s
fn conj_mul_add_e_c64s( self, a: Self::c64s, b: Self::c64s, c: Self::c64s, ) -> Self::c64s
conj(a) * b + cfn first_true_m32s(self, mask: Self::m32s) -> usize
fn first_true_m64s(self, mask: Self::m64s) -> usize
fn greater_than_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn greater_than_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
fn greater_than_or_equal_f32s(self, a: Self::f32s, b: Self::f32s) -> Self::m32s
fn greater_than_or_equal_f64s(self, a: Self::f64s, b: Self::f64s) -> Self::m64s
Source§unsafe fn mask_load_ptr_f32s(
self,
mask: MemMask<Self::m32s>,
ptr: *const f32,
) -> Self::f32s
unsafe fn mask_load_ptr_f32s( self, mask: MemMask<Self::m32s>, ptr: *const f32, ) -> Self::f32s
Source§unsafe fn mask_load_ptr_f64s(
self,
mask: MemMask<Self::m64s>,
ptr: *const f64,
) -> Self::f64s
unsafe fn mask_load_ptr_f64s( self, mask: MemMask<Self::m64s>, ptr: *const f64, ) -> Self::f64s
Source§unsafe fn mask_load_ptr_i32s(
self,
mask: MemMask<Self::m32s>,
ptr: *const i32,
) -> Self::i32s
unsafe fn mask_load_ptr_i32s( self, mask: MemMask<Self::m32s>, ptr: *const i32, ) -> Self::i32s
Source§unsafe fn mask_load_ptr_i64s(
self,
mask: MemMask<Self::m64s>,
ptr: *const i64,
) -> Self::i64s
unsafe fn mask_load_ptr_i64s( self, mask: MemMask<Self::m64s>, ptr: *const i64, ) -> Self::i64s
Source§unsafe fn mask_store_ptr_f32s(
self,
mask: MemMask<Self::m32s>,
ptr: *mut f32,
values: Self::f32s,
)
unsafe fn mask_store_ptr_f32s( self, mask: MemMask<Self::m32s>, ptr: *mut f32, values: Self::f32s, )
Source§unsafe fn mask_store_ptr_f64s(
self,
mask: MemMask<Self::m64s>,
ptr: *mut f64,
values: Self::f64s,
)
unsafe fn mask_store_ptr_f64s( self, mask: MemMask<Self::m64s>, ptr: *mut f64, values: Self::f64s, )
Source§unsafe fn mask_store_ptr_i32s(
self,
mask: MemMask<Self::m32s>,
ptr: *mut i32,
values: Self::i32s,
)
unsafe fn mask_store_ptr_i32s( self, mask: MemMask<Self::m32s>, ptr: *mut i32, values: Self::i32s, )
Source§unsafe fn mask_store_ptr_i64s(
self,
mask: MemMask<Self::m64s>,
ptr: *mut i64,
values: Self::i64s,
)
unsafe fn mask_store_ptr_i64s( self, mask: MemMask<Self::m64s>, ptr: *mut i64, values: Self::i64s, )
Source§fn mul_add_e_c32s(
self,
a: Self::c32s,
b: Self::c32s,
c: Self::c32s,
) -> Self::c32s
fn mul_add_e_c32s( self, a: Self::c32s, b: Self::c32s, c: Self::c32s, ) -> Self::c32s
a * b + cSource§fn mul_add_e_c64s(
self,
a: Self::c64s,
b: Self::c64s,
c: Self::c64s,
) -> Self::c64s
fn mul_add_e_c64s( self, a: Self::c64s, b: Self::c64s, c: Self::c64s, ) -> Self::c64s
a * b + c